arch/alpha/include/asm/agp_backend.h
15
u32 rq : 8;
arch/alpha/kernel/core_marvel.c
1088
agp->capability.bits.rq = 0xf;
arch/alpha/kernel/core_marvel.c
964
agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq);
arch/alpha/kernel/core_titan.c
789
agp->capability.bits.rq = 7; /* 8 - 1 */
arch/alpha/kernel/core_titan.c
798
agp->mode.bits.rq = 7; /* RQ Depth? */
arch/mips/include/uapi/asm/inst.h
1033
__BITFIELD_FIELD(unsigned int rq : 5,
arch/mips/kernel/process.c
307
if (ip->loongson3_lswc2_format.rq == 31) {
arch/mips/loongson64/cop2-ex.c
114
value_next = regs->regs[insn.loongson3_lswc2_format.rq];
arch/mips/loongson64/cop2-ex.c
131
value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0);
arch/mips/loongson64/cop2-ex.c
83
regs->regs[insn.loongson3_lswc2_format.rq] = value_next;
arch/mips/loongson64/cop2-ex.c
99
set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next);
arch/powerpc/include/asm/spu.h
130
struct spu_runqueue *rq;
arch/powerpc/platforms/cell/spufs/context.c
50
INIT_LIST_HEAD(&ctx->rq);
arch/powerpc/platforms/cell/spufs/context.c
82
BUG_ON(!list_empty(&ctx->rq));
arch/powerpc/platforms/cell/spufs/file.c
2486
!list_empty(&ctx->rq) ? 'q' : ' ',
arch/powerpc/platforms/cell/spufs/sched.c
102
BUG_ON(!list_empty(&ctx->rq));
arch/powerpc/platforms/cell/spufs/sched.c
490
if (list_empty(&ctx->rq)) {
arch/powerpc/platforms/cell/spufs/sched.c
491
list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
arch/powerpc/platforms/cell/spufs/sched.c
509
if (!list_empty(&ctx->rq)) {
arch/powerpc/platforms/cell/spufs/sched.c
512
list_del_init(&ctx->rq);
arch/powerpc/platforms/cell/spufs/sched.c
827
struct list_head *rq = &spu_prio->runq[best];
arch/powerpc/platforms/cell/spufs/sched.c
829
list_for_each_entry(ctx, rq, rq) {
arch/powerpc/platforms/cell/spufs/spufs.h
108
struct list_head rq;
arch/um/drivers/ubd_kern.c
1291
struct request *req = bd->rq;
block/bfq-cgroup.c
343
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
block/bfq-cgroup.c
345
struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
block/bfq-cgroup.c
350
blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
block/bfq-cgroup.c
351
blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
block/bfq-iosched.c
1001
struct request *rq;
block/bfq-iosched.c
1008
rq = rq_entry_fifo(bfqq->fifo.next);
block/bfq-iosched.c
1010
if (rq == last || blk_time_get_ns() < rq->fifo_time)
block/bfq-iosched.c
1013
bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
block/bfq-iosched.c
1014
return rq;
block/bfq-iosched.c
1045
static unsigned long bfq_serv_to_charge(struct request *rq,
block/bfq-iosched.c
1050
return blk_rq_sectors(rq);
block/bfq-iosched.c
1052
return blk_rq_sectors(rq) * bfq_async_charge_factor;
block/bfq-iosched.c
1823
struct request *rq,
block/bfq-iosched.c
1837
unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
block/bfq-iosched.c
1839
bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged;
block/bfq-iosched.c
2206
static void bfq_add_request(struct request *rq)
block/bfq-iosched.c
2208
struct bfq_queue *bfqq = RQ_BFQQ(rq);
block/bfq-iosched.c
2215
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
block/bfq-iosched.c
2216
bfqq->queued[rq_is_sync(rq)]++;
block/bfq-iosched.c
2223
if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
block/bfq-iosched.c
2296
elv_rb_add(&bfqq->sort_list, rq);
block/bfq-iosched.c
2302
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
block/bfq-iosched.c
231
#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
block/bfq-iosched.c
2314
rq, &interactive);
block/bfq-iosched.c
2316
if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
block/bfq-iosched.c
232
(get_sdist(last_pos, rq) > \
block/bfq-iosched.c
235
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
block/bfq-iosched.c
2374
static sector_t get_sdist(sector_t last_pos, struct request *rq)
block/bfq-iosched.c
2377
return abs(blk_rq_pos(rq) - last_pos);
block/bfq-iosched.c
2383
struct request *rq)
block/bfq-iosched.c
2385
struct bfq_queue *bfqq = RQ_BFQQ(rq);
block/bfq-iosched.c
2387
const int sync = rq_is_sync(rq);
block/bfq-iosched.c
2389
if (bfqq->next_rq == rq) {
block/bfq-iosched.c
2390
bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
block/bfq-iosched.c
2394
if (rq->queuelist.prev != &rq->queuelist)
block/bfq-iosched.c
2395
list_del_init(&rq->queuelist);
block/bfq-iosched.c
2402
elv_rb_del(&bfqq->sort_list, rq);
block/bfq-iosched.c
2404
elv_rqhash_del(q, rq);
block/bfq-iosched.c
2405
if (q->last_merge == rq)
block/bfq-iosched.c
2442
if (rq->cmd_flags & REQ_META)
block/bfq-iosched.c
2555
static void bfq_requests_merged(struct request_queue *q, struct request *rq,
block/bfq-iosched.c
2558
struct bfq_queue *bfqq = RQ_BFQQ(rq),
block/bfq-iosched.c
2574
!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
block/bfq-iosched.c
2575
next->fifo_time < rq->fifo_time) {
block/bfq-iosched.c
2576
list_del_init(&rq->queuelist);
block/bfq-iosched.c
2577
list_replace_init(&next->queuelist, &rq->queuelist);
block/bfq-iosched.c
2578
rq->fifo_time = next->fifo_time;
block/bfq-iosched.c
2582
bfqq->next_rq = rq;
block/bfq-iosched.c
3230
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
block/bfq-iosched.c
3240
if (is_sync && !rq_is_sync(rq))
block/bfq-iosched.c
3275
return bfqq == RQ_BFQQ(rq);
block/bfq-iosched.c
3430
struct request *rq)
block/bfq-iosched.c
3432
if (rq != NULL) { /* new rq dispatch now, reset accordingly */
block/bfq-iosched.c
3437
blk_rq_sectors(rq);
block/bfq-iosched.c
3447
static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
block/bfq-iosched.c
3553
bfq_reset_rate_computation(bfqd, rq);
block/bfq-iosched.c
3588
static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
block/bfq-iosched.c
3595
bfq_reset_rate_computation(bfqd, rq);
block/bfq-iosched.c
3620
&& !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
block/bfq-iosched.c
3623
bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
block/bfq-iosched.c
3628
max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
block/bfq-iosched.c
3630
bfqd->last_rq_max_size = blk_rq_sectors(rq);
block/bfq-iosched.c
3639
bfq_update_rate_reset(bfqd, rq);
block/bfq-iosched.c
3641
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
block/bfq-iosched.c
3642
if (RQ_BFQQ(rq) == bfqd->in_service_queue)
block/bfq-iosched.c
3650
static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
block/bfq-iosched.c
3652
struct bfq_queue *bfqq = RQ_BFQQ(rq);
block/bfq-iosched.c
3667
bfq_update_peak_rate(q->elevator->elevator_data, rq);
block/bfq-iosched.c
3669
bfq_remove_request(q, rq);
block/bfq-iosched.c
376
#define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
block/bfq-iosched.c
377
#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
block/bfq-iosched.c
5103
struct request *rq = bfqq->next_rq;
block/bfq-iosched.c
5106
service_to_charge = bfq_serv_to_charge(rq, bfqq);
block/bfq-iosched.c
5112
bfqd->waited_rq = rq;
block/bfq-iosched.c
5115
bfq_dispatch_remove(bfqd->queue, rq);
block/bfq-iosched.c
5118
return rq;
block/bfq-iosched.c
5141
return rq;
block/bfq-iosched.c
5159
struct request *rq = NULL;
block/bfq-iosched.c
5163
rq = list_first_entry(&bfqd->dispatch, struct request,
block/bfq-iosched.c
5165
list_del_init(&rq->queuelist);
block/bfq-iosched.c
5167
bfqq = RQ_BFQQ(rq);
block/bfq-iosched.c
5232
rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
block/bfq-iosched.c
5234
if (rq) {
block/bfq-iosched.c
5239
rq->rq_flags |= RQF_STARTED;
block/bfq-iosched.c
5242
return rq;
block/bfq-iosched.c
5247
struct request *rq,
block/bfq-iosched.c
5251
struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
block/bfq-iosched.c
5286
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
block/bfq-iosched.c
5292
struct request *rq,
block/bfq-iosched.c
5300
struct request *rq;
block/bfq-iosched.c
5309
rq = __bfq_dispatch_request(hctx);
block/bfq-iosched.c
5316
bfq_update_dispatch_stats(hctx->queue, rq,
block/bfq-iosched.c
5320
return rq;
block/bfq-iosched.c
5913
struct request *rq)
block/bfq-iosched.c
5916
bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
block/bfq-iosched.c
6075
struct request *rq)
block/bfq-iosched.c
6077
if (rq->cmd_flags & REQ_META)
block/bfq-iosched.c
6080
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
block/bfq-iosched.c
6083
bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
block/bfq-iosched.c
6084
blk_rq_sectors(rq) < 32;
block/bfq-iosched.c
6147
static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
block/bfq-iosched.c
6149
struct bfq_queue *bfqq = RQ_BFQQ(rq),
block/bfq-iosched.c
6150
*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
block/bfq-iosched.c
6151
RQ_BIC(rq));
block/bfq-iosched.c
6171
if (bic_to_bfqq(RQ_BIC(rq), true,
block/bfq-iosched.c
6172
bfq_actuator_index(bfqd, rq->bio)) == bfqq) {
block/bfq-iosched.c
6174
bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
block/bfq-iosched.c
6183
rq->elv.priv[1] = new_bfqq;
block/bfq-iosched.c
6187
bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
block/bfq-iosched.c
6188
bfq_update_io_seektime(bfqd, bfqq, rq);
block/bfq-iosched.c
6191
bfq_add_request(rq);
block/bfq-iosched.c
6194
rq->fifo_time = blk_time_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
block/bfq-iosched.c
6195
list_add_tail(&rq->queuelist, &bfqq->fifo);
block/bfq-iosched.c
6197
bfq_rq_enqueued(bfqd, bfqq, rq);
block/bfq-iosched.c
6234
static struct bfq_queue *bfq_init_rq(struct request *rq);
block/bfq-iosched.c
6236
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
block/bfq-iosched.c
6247
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
block/bfq-iosched.c
6248
bfqg_stats_update_legacy_io(q, rq);
block/bfq-iosched.c
6251
bfqq = bfq_init_rq(rq);
block/bfq-iosched.c
6252
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
block/bfq-iosched.c
6258
trace_block_rq_insert(rq);
block/bfq-iosched.c
6261
list_add(&rq->queuelist, &bfqd->dispatch);
block/bfq-iosched.c
6263
list_add_tail(&rq->queuelist, &bfqd->dispatch);
block/bfq-iosched.c
6265
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
block/bfq-iosched.c
6271
bfqq = RQ_BFQQ(rq);
block/bfq-iosched.c
6273
if (rq_mergeable(rq)) {
block/bfq-iosched.c
6274
elv_rqhash_add(q, rq);
block/bfq-iosched.c
6276
q->last_merge = rq;
block/bfq-iosched.c
6285
cmd_flags = rq->cmd_flags;
block/bfq-iosched.c
6297
struct request *rq;
block/bfq-iosched.c
6299
rq = list_first_entry(list, struct request, queuelist);
block/bfq-iosched.c
6300
list_del_init(&rq->queuelist);
block/bfq-iosched.c
6301
bfq_insert_request(hctx, rq, flags);
block/bfq-iosched.c
6645
static void bfq_finish_requeue_request(struct request *rq)
block/bfq-iosched.c
6647
struct bfq_queue *bfqq = RQ_BFQQ(rq);
block/bfq-iosched.c
6656
if (!rq->elv.icq || !bfqq)
block/bfq-iosched.c
6661
if (rq->rq_flags & RQF_STARTED)
block/bfq-iosched.c
6663
rq->start_time_ns,
block/bfq-iosched.c
6664
rq->io_start_time_ns,
block/bfq-iosched.c
6665
rq->cmd_flags);
block/bfq-iosched.c
6668
if (likely(rq->rq_flags & RQF_STARTED)) {
block/bfq-iosched.c
6669
if (rq == bfqd->waited_rq)
block/bfq-iosched.c
6676
RQ_BIC(rq)->requests--;
block/bfq-iosched.c
6696
rq->elv.priv[0] = NULL;
block/bfq-iosched.c
6697
rq->elv.priv[1] = NULL;
block/bfq-iosched.c
6700
static void bfq_finish_request(struct request *rq)
block/bfq-iosched.c
6702
bfq_finish_requeue_request(rq);
block/bfq-iosched.c
6704
if (rq->elv.icq) {
block/bfq-iosched.c
6705
put_io_context(rq->elv.icq->ioc);
block/bfq-iosched.c
6706
rq->elv.icq = NULL;
block/bfq-iosched.c
6806
static void bfq_prepare_request(struct request *rq)
block/bfq-iosched.c
6808
rq->elv.icq = ioc_find_get_icq(rq->q);
block/bfq-iosched.c
6815
rq->elv.priv[0] = rq->elv.priv[1] = NULL;
block/bfq-iosched.c
6927
static struct bfq_queue *bfq_init_rq(struct request *rq)
block/bfq-iosched.c
6929
struct request_queue *q = rq->q;
block/bfq-iosched.c
6930
struct bio *bio = rq->bio;
block/bfq-iosched.c
6933
const int is_sync = rq_is_sync(rq);
block/bfq-iosched.c
6937
if (unlikely(!rq->elv.icq))
block/bfq-iosched.c
6947
if (RQ_BFQQ(rq))
block/bfq-iosched.c
6948
return RQ_BFQQ(rq);
block/bfq-iosched.c
6950
bic = icq_to_bic(rq->elv.icq);
block/bfq-iosched.c
6959
rq, bfqq, bfqq->ref);
block/bfq-iosched.c
6961
rq->elv.priv[0] = bic;
block/bfq-iosched.c
6962
rq->elv.priv[1] = bfqq;
block/bfq-iosched.h
1066
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
block/blk-cgroup.h
451
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
block/blk-cgroup.h
453
return rq->bio->bi_blkg == bio->bi_blkg &&
block/blk-cgroup.h
454
bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
block/blk-cgroup.h
496
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
block/blk-crypto-internal.h
106
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
block/blk-crypto-internal.h
130
static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
block/blk-crypto-internal.h
132
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
block/blk-crypto-internal.h
137
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
block/blk-crypto-internal.h
169
static inline void bio_crypt_do_front_merge(struct request *rq,
block/blk-crypto-internal.h
174
memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
block/blk-crypto-internal.h
175
sizeof(rq->crypt_ctx->bc_dun));
block/blk-crypto-internal.h
179
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
block/blk-crypto-internal.h
180
static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
block/blk-crypto-internal.h
182
if (blk_crypto_rq_is_encrypted(rq))
block/blk-crypto-internal.h
183
return __blk_crypto_rq_get_keyslot(rq);
block/blk-crypto-internal.h
187
void __blk_crypto_rq_put_keyslot(struct request *rq);
block/blk-crypto-internal.h
188
static inline void blk_crypto_rq_put_keyslot(struct request *rq)
block/blk-crypto-internal.h
190
if (blk_crypto_rq_has_keyslot(rq))
block/blk-crypto-internal.h
191
__blk_crypto_rq_put_keyslot(rq);
block/blk-crypto-internal.h
194
void __blk_crypto_free_request(struct request *rq);
block/blk-crypto-internal.h
195
static inline void blk_crypto_free_request(struct request *rq)
block/blk-crypto-internal.h
197
if (blk_crypto_rq_is_encrypted(rq))
block/blk-crypto-internal.h
198
__blk_crypto_free_request(rq);
block/blk-crypto-internal.h
201
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
block/blk-crypto-internal.h
213
static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
block/blk-crypto-internal.h
217
return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
block/blk-crypto-internal.h
32
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
block/blk-crypto-internal.h
58
static inline void blk_crypto_rq_set_defaults(struct request *rq)
block/blk-crypto-internal.h
60
rq->crypt_ctx = NULL;
block/blk-crypto-internal.h
61
rq->crypt_keyslot = NULL;
block/blk-crypto-internal.h
64
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
block/blk-crypto-internal.h
66
return rq->crypt_ctx;
block/blk-crypto-internal.h
69
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
block/blk-crypto-internal.h
71
return rq->crypt_keyslot;
block/blk-crypto.c
203
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
block/blk-crypto.c
205
return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
block/blk-crypto.c
222
blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
block/blk-crypto.c
224
return blk_crypto_get_keyslot(rq->q->crypto_profile,
block/blk-crypto.c
225
rq->crypt_ctx->bc_key,
block/blk-crypto.c
226
&rq->crypt_keyslot);
block/blk-crypto.c
229
void __blk_crypto_rq_put_keyslot(struct request *rq)
block/blk-crypto.c
231
blk_crypto_put_keyslot(rq->crypt_keyslot);
block/blk-crypto.c
232
rq->crypt_keyslot = NULL;
block/blk-crypto.c
235
void __blk_crypto_free_request(struct request *rq)
block/blk-crypto.c
238
if (WARN_ON_ONCE(rq->crypt_keyslot))
block/blk-crypto.c
239
__blk_crypto_rq_put_keyslot(rq);
block/blk-crypto.c
241
mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
block/blk-crypto.c
242
rq->crypt_ctx = NULL;
block/blk-crypto.c
281
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
block/blk-crypto.c
284
if (!rq->crypt_ctx) {
block/blk-crypto.c
285
rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
block/blk-crypto.c
286
if (!rq->crypt_ctx)
block/blk-crypto.c
289
*rq->crypt_ctx = *bio->bi_crypt_context;
block/blk-flush.c
103
static unsigned int blk_flush_cur_seq(struct request *rq)
block/blk-flush.c
105
return 1 << ffz(rq->flush.seq);
block/blk-flush.c
108
static void blk_flush_restore_request(struct request *rq)
block/blk-flush.c
115
rq->bio = rq->biotail;
block/blk-flush.c
116
if (rq->bio)
block/blk-flush.c
117
rq->__sector = rq->bio->bi_iter.bi_sector;
block/blk-flush.c
120
rq->rq_flags &= ~RQF_FLUSH_SEQ;
block/blk-flush.c
121
rq->end_io = rq->flush.saved_end_io;
block/blk-flush.c
124
static void blk_account_io_flush(struct request *rq)
block/blk-flush.c
126
struct block_device *part = rq->q->disk->part0;
block/blk-flush.c
131
blk_time_get_ns() - rq->start_time_ns);
block/blk-flush.c
148
static void blk_flush_complete_seq(struct request *rq,
block/blk-flush.c
152
struct request_queue *q = rq->q;
block/blk-flush.c
156
BUG_ON(rq->flush.seq & seq);
block/blk-flush.c
157
rq->flush.seq |= seq;
block/blk-flush.c
158
cmd_flags = rq->cmd_flags;
block/blk-flush.c
161
seq = blk_flush_cur_seq(rq);
block/blk-flush.c
171
list_add_tail(&rq->queuelist, pending);
block/blk-flush.c
177
list_move(&rq->queuelist, &q->requeue_list);
block/blk-flush.c
189
list_del_init(&rq->queuelist);
block/blk-flush.c
190
blk_flush_restore_request(rq);
block/blk-flush.c
191
blk_mq_end_request(rq, error);
block/blk-flush.c
207
struct request *rq, *n;
block/blk-flush.c
246
list_for_each_entry_safe(rq, n, running, queuelist) {
block/blk-flush.c
247
unsigned int seq = blk_flush_cur_seq(rq);
block/blk-flush.c
250
list_del_init(&rq->queuelist);
block/blk-flush.c
251
blk_flush_complete_seq(rq, fq, seq, error);
block/blk-flush.c
258
bool is_flush_rq(struct request *rq)
block/blk-flush.c
260
return rq->end_io == flush_end_io;
block/blk-flush.c
338
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
block/blk-flush.c
342
struct request_queue *q = rq->q;
block/blk-flush.c
343
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-flush.c
344
struct blk_mq_ctx *ctx = rq->mq_ctx;
block/blk-flush.c
349
WARN_ON(rq->tag < 0);
block/blk-flush.c
350
blk_mq_put_driver_tag(rq);
block/blk-flush.c
363
INIT_LIST_HEAD(&rq->queuelist);
block/blk-flush.c
364
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
block/blk-flush.c
371
static void blk_rq_init_flush(struct request *rq)
block/blk-flush.c
373
rq->flush.seq = 0;
block/blk-flush.c
374
rq->rq_flags |= RQF_FLUSH_SEQ;
block/blk-flush.c
375
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
block/blk-flush.c
376
rq->end_io = mq_flush_data_end_io;
block/blk-flush.c
384
bool blk_insert_flush(struct request *rq)
block/blk-flush.c
386
struct request_queue *q = rq->q;
block/blk-flush.c
387
struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx);
block/blk-flush.c
392
WARN_ON_ONCE(rq->bio != rq->biotail);
block/blk-flush.c
394
if (blk_rq_sectors(rq))
block/blk-flush.c
401
if (rq->cmd_flags & REQ_PREFLUSH)
block/blk-flush.c
403
if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
block/blk-flush.c
411
rq->cmd_flags &= ~REQ_PREFLUSH;
block/blk-flush.c
413
rq->cmd_flags &= ~REQ_FUA;
block/blk-flush.c
420
rq->cmd_flags |= REQ_SYNC;
block/blk-flush.c
430
blk_mq_end_request(rq, 0);
block/blk-flush.c
444
blk_rq_init_flush(rq);
block/blk-flush.c
445
rq->flush.seq |= REQ_FSEQ_PREFLUSH;
block/blk-flush.c
455
blk_rq_init_flush(rq);
block/blk-flush.c
457
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
block/blk-integrity.c
123
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
block/blk-integrity.c
129
iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
block/blk-integrity.c
130
ret = bio_integrity_map_user(rq->bio, &iter);
block/blk-integrity.c
134
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
block/blk-integrity.c
135
rq->cmd_flags |= REQ_INTEGRITY;
block/blk-iocost.c
2581
static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
block/blk-iocost.c
2584
unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
block/blk-iocost.c
2586
switch (req_op(rq)) {
block/blk-iocost.c
2598
static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
block/blk-iocost.c
2602
calc_size_vtime_cost_builtin(rq, ioc, &cost);
block/blk-iocost.c
2740
static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
block/blk-iocost.c
2764
if (blk_rq_pos(rq) < bio_end &&
block/blk-iocost.c
2765
blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
block/blk-iocost.c
2772
if (rq->bio && rq->bio->bi_iocost_cost &&
block/blk-iocost.c
2807
static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
block/blk-iocost.c
2814
if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
block/blk-iocost.c
2817
switch (req_op(rq)) {
block/blk-iocost.c
2830
on_q_ns = blk_time_get_ns() - rq->alloc_time_ns;
block/blk-iocost.c
2831
rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
block/blk-iocost.c
2832
size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
block/blk-map.c
150
static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
block/blk-map.c
176
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
block/blk-map.c
247
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
260
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
block/blk-map.c
270
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
block/blk-map.c
280
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
311
static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len,
block/blk-map.c
317
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
block/blk-map.c
365
static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len,
block/blk-map.c
368
enum req_op op = req_op(rq);
block/blk-map.c
383
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
block/blk-map.c
426
int blk_rq_append_bio(struct request *rq, struct bio *bio)
block/blk-map.c
428
const struct queue_limits *lim = &rq->q->limits;
block/blk-map.c
442
if (rq->bio) {
block/blk-map.c
443
if (!ll_back_merge_fn(rq, bio, nr_segs))
block/blk-map.c
445
rq->phys_gap_bit = bio_seg_gap(rq->q, rq->biotail, bio,
block/blk-map.c
446
rq->phys_gap_bit);
block/blk-map.c
447
rq->biotail->bi_next = bio;
block/blk-map.c
448
rq->biotail = bio;
block/blk-map.c
449
rq->__data_len += bio->bi_iter.bi_size;
block/blk-map.c
45
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
block/blk-map.c
454
rq->nr_phys_segments = nr_segs;
block/blk-map.c
455
rq->bio = rq->biotail = bio;
block/blk-map.c
456
rq->__data_len = bio->bi_iter.bi_size;
block/blk-map.c
457
rq->phys_gap_bit = bio->bi_bvec_gap_bit;
block/blk-map.c
463
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
block/blk-map.c
465
unsigned int max_bytes = rq->q->limits.max_hw_sectors << SECTOR_SHIFT;
block/blk-map.c
473
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
block/blk-map.c
478
ret = blk_rq_append_bio(rq, bio);
block/blk-map.c
48
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
block/blk-map.c
499
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
block/blk-map.c
51
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
block/blk-map.c
521
ret = blk_rq_map_user_bvec(rq, iter);
block/blk-map.c
533
ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
block/blk-map.c
535
ret = bio_map_user_iov(rq, &i, gfp_mask);
block/blk-map.c
542
bio = rq->bio;
block/blk-map.c
550
rq->bio = NULL;
block/blk-map.c
555
int blk_rq_map_user(struct request_queue *q, struct request *rq,
block/blk-map.c
560
int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
block/blk-map.c
565
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
block/blk-map.c
652
int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
block/blk-map.c
659
if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT))
block/blk-map.c
664
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
block/blk-map.c
665
bio = bio_copy_kern(rq, kbuf, len, gfp_mask);
block/blk-map.c
667
bio = bio_map_kern(rq, kbuf, len, gfp_mask);
block/blk-map.c
672
ret = blk_rq_append_bio(rq, bio);
block/blk-merge.c
1039
struct request *rq,
block/blk-merge.c
1044
if (!blk_rq_merge_ok(rq, bio))
block/blk-merge.c
1047
switch (blk_try_merge(rq, bio)) {
block/blk-merge.c
1049
if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1050
return bio_attempt_back_merge(rq, bio, nr_segs);
block/blk-merge.c
1053
if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1054
return bio_attempt_front_merge(rq, bio, nr_segs);
block/blk-merge.c
1057
return bio_attempt_discard_merge(q, rq, bio);
block/blk-merge.c
1089
struct request *rq;
block/blk-merge.c
1094
rq = plug->mq_list.tail;
block/blk-merge.c
1095
if (rq->q == q)
block/blk-merge.c
1096
return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
block/blk-merge.c
1101
rq_list_for_each(&plug->mq_list, rq) {
block/blk-merge.c
1102
if (rq->q != q)
block/blk-merge.c
1104
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
block/blk-merge.c
1119
struct request *rq;
block/blk-merge.c
1122
list_for_each_entry_reverse(rq, list, queuelist) {
block/blk-merge.c
1126
switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
block/blk-merge.c
1144
struct request *rq;
block/blk-merge.c
1146
switch (elv_merge(q, &rq, bio)) {
block/blk-merge.c
1148
if (!blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1150
if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
block/blk-merge.c
1152
*merged_request = attempt_back_merge(q, rq);
block/blk-merge.c
1154
elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
block/blk-merge.c
1157
if (!blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1159
if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
block/blk-merge.c
1161
*merged_request = attempt_front_merge(q, rq);
block/blk-merge.c
1163
elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
block/blk-merge.c
1166
return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
block/blk-merge.c
489
unsigned int blk_recalc_rq_segments(struct request *rq)
block/blk-merge.c
496
if (!rq->bio)
block/blk-merge.c
499
switch (bio_op(rq->bio)) {
block/blk-merge.c
502
if (queue_max_discard_segments(rq->q) > 1) {
block/blk-merge.c
503
struct bio *bio = rq->bio;
block/blk-merge.c
516
rq_for_each_bvec(bv, rq, iter)
block/blk-merge.c
517
bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
block/blk-merge.c
522
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
block/blk-merge.c
525
struct request_queue *q = rq->q;
block/blk-merge.c
528
bool is_atomic = rq->cmd_flags & REQ_ATOMIC;
block/blk-merge.c
530
if (blk_rq_is_passthrough(rq))
block/blk-merge.c
534
max_sectors = blk_queue_get_max_sectors(rq);
block/blk-merge.c
537
req_op(rq) == REQ_OP_DISCARD ||
block/blk-merge.c
538
req_op(rq) == REQ_OP_SECURE_ERASE)
block/blk-merge.c
673
static void blk_rq_set_mixed_merge(struct request *rq)
block/blk-merge.c
675
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
block/blk-merge.c
678
if (rq->rq_flags & RQF_MIXED_MERGE)
block/blk-merge.c
686
for (bio = rq->bio; bio; bio = bio->bi_next) {
block/blk-merge.c
691
rq->rq_flags |= RQF_MIXED_MERGE;
block/blk-merge.c
743
static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
block/blk-merge.c
746
return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
block/blk-merge.c
749
static bool blk_atomic_write_mergeable_rqs(struct request *rq,
block/blk-merge.c
752
return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
block/blk-merge.c
866
struct request *rq)
block/blk-merge.c
868
struct request *next = elv_latter_request(q, rq);
block/blk-merge.c
871
return attempt_merge(q, rq, next);
block/blk-merge.c
877
struct request *rq)
block/blk-merge.c
879
struct request *prev = elv_former_request(q, rq);
block/blk-merge.c
882
return attempt_merge(q, prev, rq);
block/blk-merge.c
892
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
block/blk-merge.c
895
return attempt_merge(q, rq, next);
block/blk-merge.c
898
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
block/blk-merge.c
900
if (!rq_mergeable(rq) || !bio_mergeable(bio))
block/blk-merge.c
903
if (req_op(rq) != bio_op(bio))
block/blk-merge.c
906
if (!blk_cgroup_mergeable(rq, bio))
block/blk-merge.c
908
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
block/blk-merge.c
910
if (!bio_crypt_rq_ctx_compatible(rq, bio))
block/blk-merge.c
912
if (rq->bio->bi_write_hint != bio->bi_write_hint)
block/blk-merge.c
914
if (rq->bio->bi_write_stream != bio->bi_write_stream)
block/blk-merge.c
916
if (rq->bio->bi_ioprio != bio->bi_ioprio)
block/blk-merge.c
918
if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
block/blk-merge.c
924
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
block/blk-merge.c
926
if (blk_discard_mergable(rq))
block/blk-merge.c
928
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
block/blk-merge.c
930
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
block/blk-mq-debugfs.c
263
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
block/blk-mq-debugfs.c
265
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
block/blk-mq-debugfs.c
266
const enum req_op op = req_op(rq);
block/blk-mq-debugfs.c
272
seq_printf(m, "%p {.op=", rq);
block/blk-mq-debugfs.c
278
blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
block/blk-mq-debugfs.c
281
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
block/blk-mq-debugfs.c
283
seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
block/blk-mq-debugfs.c
284
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
block/blk-mq-debugfs.c
285
rq->internal_tag);
block/blk-mq-debugfs.c
287
mq_ops->show_rq(m, rq);
block/blk-mq-debugfs.c
340
static bool hctx_show_busy_rq(struct request *rq, void *data)
block/blk-mq-debugfs.c
344
if (rq->mq_hctx == params->hctx)
block/blk-mq-debugfs.c
345
__blk_mq_debugfs_rq_show(params->m, rq);
block/blk-mq-debugfs.h
20
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
block/blk-mq-dma.c
140
static inline void blk_rq_map_iter_init(struct request *rq,
block/blk-mq-dma.c
143
struct bio *bio = rq->bio;
block/blk-mq-dma.c
145
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
block/blk-mq-dma.c
147
.bvecs = &rq->special_vec,
block/blk-mq-dma.c
149
.bi_size = rq->special_vec.bv_len,
block/blk-mq-dma.c
287
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
block/blk-mq-dma.c
294
blk_rq_map_iter_init(rq, &iter);
block/blk-mq-dma.c
295
while (blk_map_iter_next(rq, &iter, &vec)) {
block/blk-mq-dma.c
311
WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
block/blk-mq-dma.c
402
int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
block/blk-mq-dma.c
404
struct request_queue *q = rq->q;
block/blk-mq-dma.c
406
struct bio *bio = rq->bio;
block/blk-mq-dma.c
417
while (blk_map_iter_next(rq, &iter, &vec)) {
block/blk-mq-dma.c
433
BUG_ON(segments > rq->nr_integrity_segments);
block/blk-mq-sched.c
101
struct request *rq;
block/blk-mq-sched.c
116
rq = e->type->ops.dispatch_request(hctx);
block/blk-mq-sched.c
117
if (!rq) {
block/blk-mq-sched.c
130
blk_mq_set_rq_budget_token(rq, budget_token);
block/blk-mq-sched.c
137
list_add_tail(&rq->queuelist, &rq_list);
block/blk-mq-sched.c
139
if (rq->mq_hctx != hctx)
block/blk-mq-sched.c
148
if (!blk_mq_get_driver_tag(rq))
block/blk-mq-sched.c
219
struct request *rq;
block/blk-mq-sched.c
236
rq = blk_mq_dequeue_from_ctx(hctx, ctx);
block/blk-mq-sched.c
237
if (!rq) {
block/blk-mq-sched.c
250
blk_mq_set_rq_budget_token(rq, budget_token);
block/blk-mq-sched.c
257
list_add(&rq->queuelist, &rq_list);
block/blk-mq-sched.c
260
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
block/blk-mq-sched.c
262
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, false));
block/blk-mq-sched.c
370
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
block/blk-mq-sched.c
373
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
block/blk-mq-sched.c
60
struct request *rq;
block/blk-mq-sched.c
63
list_for_each_entry(rq, rq_list, queuelist) {
block/blk-mq-sched.c
64
if (rq->mq_hctx != hctx) {
block/blk-mq-sched.c
65
list_cut_before(&hctx_list, rq_list, &rq->queuelist);
block/blk-mq-sched.h
103
static inline void blk_mq_sched_requeue_request(struct request *rq)
block/blk-mq-sched.h
105
if (rq->rq_flags & RQF_USE_SCHED) {
block/blk-mq-sched.h
106
struct request_queue *q = rq->q;
block/blk-mq-sched.h
110
e->type->ops.requeue_request(rq);
block/blk-mq-sched.h
14
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
block/blk-mq-sched.h
81
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
block/blk-mq-sched.h
84
if (rq->rq_flags & RQF_USE_SCHED) {
block/blk-mq-sched.h
88
return e->type->ops.allow_merge(q, rq, bio);
block/blk-mq-sched.h
93
static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
block/blk-mq-sched.h
95
if (rq->rq_flags & RQF_USE_SCHED) {
block/blk-mq-sched.h
96
struct elevator_queue *e = rq->q->elevator;
block/blk-mq-sched.h
99
e->type->ops.completed_request(rq, now);
block/blk-mq-tag.c
258
struct request *rq;
block/blk-mq-tag.c
260
rq = tags->rqs[bitnr];
block/blk-mq-tag.c
261
if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
block/blk-mq-tag.c
262
rq = NULL;
block/blk-mq-tag.c
263
return rq;
block/blk-mq-tag.c
273
struct request *rq;
block/blk-mq-tag.c
287
rq = blk_mq_find_and_get_req(tags, bitnr);
block/blk-mq-tag.c
288
if (!rq)
block/blk-mq-tag.c
291
if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
block/blk-mq-tag.c
292
ret = iter_data->fn(rq, iter_data->data);
block/blk-mq-tag.c
293
blk_mq_put_rq_ref(rq);
block/blk-mq-tag.c
342
struct request *rq;
block/blk-mq-tag.c
354
rq = tags->static_rqs[bitnr];
block/blk-mq-tag.c
356
rq = blk_mq_find_and_get_req(tags, bitnr);
block/blk-mq-tag.c
357
if (!rq)
block/blk-mq-tag.c
361
blk_mq_request_started(rq))
block/blk-mq-tag.c
362
ret = iter_data->fn(rq, iter_data->data);
block/blk-mq-tag.c
364
blk_mq_put_rq_ref(rq);
block/blk-mq-tag.c
453
static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
block/blk-mq-tag.c
457
if (blk_mq_request_completed(rq))
block/blk-mq-tag.c
644
u32 blk_mq_unique_tag(struct request *rq)
block/blk-mq-tag.c
646
return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
block/blk-mq-tag.c
647
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
block/blk-mq.c
100
mi->inflight[rq_data_dir(rq)]++;
block/blk-mq.c
1150
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
block/blk-mq.c
1152
if (rq->rq_flags & RQF_STATS)
block/blk-mq.c
1153
blk_stat_add(rq, now);
block/blk-mq.c
1155
blk_mq_sched_completed_request(rq, now);
block/blk-mq.c
1156
blk_account_io_done(rq, now);
block/blk-mq.c
1159
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
block/blk-mq.c
1161
if (blk_mq_need_time_stamp(rq))
block/blk-mq.c
1162
__blk_mq_end_request_acct(rq, blk_time_get_ns());
block/blk-mq.c
1164
blk_mq_finish_request(rq);
block/blk-mq.c
1166
if (rq->end_io) {
block/blk-mq.c
1167
rq_qos_done(rq->q, rq);
block/blk-mq.c
1168
if (rq->end_io(rq, error, NULL) == RQ_END_IO_FREE)
block/blk-mq.c
1169
blk_mq_free_request(rq);
block/blk-mq.c
1171
blk_mq_free_request(rq);
block/blk-mq.c
1176
void blk_mq_end_request(struct request *rq, blk_status_t error)
block/blk-mq.c
1178
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
block/blk-mq.c
1180
__blk_mq_end_request(rq, error);
block/blk-mq.c
1201
struct request *rq;
block/blk-mq.c
1207
while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
block/blk-mq.c
1208
prefetch(rq->bio);
block/blk-mq.c
1209
prefetch(rq->rq_next);
block/blk-mq.c
1211
blk_complete_request(rq);
block/blk-mq.c
1213
__blk_mq_end_request_acct(rq, now);
block/blk-mq.c
1215
blk_mq_finish_request(rq);
block/blk-mq.c
1217
rq_qos_done(rq->q, rq);
block/blk-mq.c
1223
if (rq->end_io && rq->end_io(rq, 0, iob) == RQ_END_IO_NONE)
block/blk-mq.c
1226
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
block/blk-mq.c
1227
if (!req_ref_put_and_test(rq))
block/blk-mq.c
1230
blk_crypto_free_request(rq);
block/blk-mq.c
1231
blk_pm_mark_last_busy(rq);
block/blk-mq.c
1233
if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
block/blk-mq.c
1237
cur_hctx = rq->mq_hctx;
block/blk-mq.c
1239
tags[nr_tags++] = rq->tag;
block/blk-mq.c
1250
struct request *rq, *next;
block/blk-mq.c
1252
llist_for_each_entry_safe(rq, next, entry, ipi_list)
block/blk-mq.c
1253
rq->q->mq_ops->complete(rq);
block/blk-mq.c
1272
static inline bool blk_mq_complete_need_ipi(struct request *rq)
block/blk-mq.c
1277
!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
block/blk-mq.c
1289
if (cpu == rq->mq_ctx->cpu ||
block/blk-mq.c
1290
(!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
block/blk-mq.c
1291
cpus_share_cache(cpu, rq->mq_ctx->cpu) &&
block/blk-mq.c
1292
cpus_equal_capacity(cpu, rq->mq_ctx->cpu)))
block/blk-mq.c
1296
return cpu_online(rq->mq_ctx->cpu);
block/blk-mq.c
1299
static void blk_mq_complete_send_ipi(struct request *rq)
block/blk-mq.c
1303
cpu = rq->mq_ctx->cpu;
block/blk-mq.c
1304
if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
block/blk-mq.c
1308
static void blk_mq_raise_softirq(struct request *rq)
block/blk-mq.c
1314
if (llist_add(&rq->ipi_list, list))
block/blk-mq.c
1319
bool blk_mq_complete_request_remote(struct request *rq)
block/blk-mq.c
1321
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
block/blk-mq.c
1328
if ((rq->mq_hctx->nr_ctx == 1 &&
block/blk-mq.c
1329
rq->mq_ctx->cpu == raw_smp_processor_id()) ||
block/blk-mq.c
1330
rq->cmd_flags & REQ_POLLED)
block/blk-mq.c
1333
if (blk_mq_complete_need_ipi(rq)) {
block/blk-mq.c
1334
blk_mq_complete_send_ipi(rq);
block/blk-mq.c
1338
if (rq->q->nr_hw_queues == 1) {
block/blk-mq.c
1339
blk_mq_raise_softirq(rq);
block/blk-mq.c
1353
void blk_mq_complete_request(struct request *rq)
block/blk-mq.c
1355
if (!blk_mq_complete_request_remote(rq))
block/blk-mq.c
1356
rq->q->mq_ops->complete(rq);
block/blk-mq.c
1368
void blk_mq_start_request(struct request *rq)
block/blk-mq.c
1370
struct request_queue *q = rq->q;
block/blk-mq.c
1372
trace_block_rq_issue(rq);
block/blk-mq.c
1375
!blk_rq_is_passthrough(rq)) {
block/blk-mq.c
1376
rq->io_start_time_ns = blk_time_get_ns();
block/blk-mq.c
1377
rq->stats_sectors = blk_rq_sectors(rq);
block/blk-mq.c
1378
rq->rq_flags |= RQF_STATS;
block/blk-mq.c
1379
rq_qos_issue(q, rq);
block/blk-mq.c
1382
WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
block/blk-mq.c
1384
blk_add_timer(rq);
block/blk-mq.c
1385
WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
block/blk-mq.c
1386
rq->mq_hctx->tags->rqs[rq->tag] = rq;
block/blk-mq.c
1388
if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
block/blk-mq.c
1389
blk_integrity_prepare(rq);
block/blk-mq.c
1391
if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
block/blk-mq.c
1392
WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
block/blk-mq.c
1408
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
block/blk-mq.c
1413
trace_block_plug(rq->q);
block/blk-mq.c
1415
(!blk_queue_nomerges(rq->q) &&
block/blk-mq.c
1419
trace_block_plug(rq->q);
block/blk-mq.c
1422
if (!plug->multiple_queues && last && last->q != rq->q)
block/blk-mq.c
1428
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
block/blk-mq.c
1430
rq_list_add_tail(&plug->mq_list, rq);
block/blk-mq.c
1446
void blk_execute_rq_nowait(struct request *rq, bool at_head)
block/blk-mq.c
1448
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
1451
WARN_ON(!blk_rq_is_passthrough(rq));
block/blk-mq.c
1453
blk_account_io_start(rq);
block/blk-mq.c
1456
blk_add_rq_to_plug(current->plug, rq);
block/blk-mq.c
1460
blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
block/blk-mq.c
1470
static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret,
block/blk-mq.c
1473
struct blk_rq_wait *wait = rq->end_io_data;
block/blk-mq.c
1480
bool blk_rq_is_poll(struct request *rq)
block/blk-mq.c
1482
if (!rq->mq_hctx)
block/blk-mq.c
1484
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
block/blk-mq.c
1490
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
block/blk-mq.c
1493
blk_hctx_poll(rq->q, rq->mq_hctx, NULL, BLK_POLL_ONESHOT);
block/blk-mq.c
1508
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
block/blk-mq.c
1510
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
1516
WARN_ON(!blk_rq_is_passthrough(rq));
block/blk-mq.c
1518
rq->end_io_data = &wait;
block/blk-mq.c
1519
rq->end_io = blk_end_sync_rq;
block/blk-mq.c
1521
blk_account_io_start(rq);
block/blk-mq.c
1522
blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
block/blk-mq.c
1525
if (blk_rq_is_poll(rq))
block/blk-mq.c
1526
blk_rq_poll_completion(rq, &wait.done);
block/blk-mq.c
1534
static void __blk_mq_requeue_request(struct request *rq)
block/blk-mq.c
1536
struct request_queue *q = rq->q;
block/blk-mq.c
1538
blk_mq_put_driver_tag(rq);
block/blk-mq.c
1540
trace_block_rq_requeue(rq);
block/blk-mq.c
1541
rq_qos_requeue(q, rq);
block/blk-mq.c
1543
if (blk_mq_request_started(rq)) {
block/blk-mq.c
1544
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
block/blk-mq.c
1545
rq->rq_flags &= ~RQF_TIMED_OUT;
block/blk-mq.c
1549
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
block/blk-mq.c
1551
struct request_queue *q = rq->q;
block/blk-mq.c
1554
__blk_mq_requeue_request(rq);
block/blk-mq.c
1557
blk_mq_sched_requeue_request(rq);
block/blk-mq.c
1560
list_add_tail(&rq->queuelist, &q->requeue_list);
block/blk-mq.c
1574
struct request *rq;
block/blk-mq.c
1582
rq = list_entry(rq_list.next, struct request, queuelist);
block/blk-mq.c
1583
list_del_init(&rq->queuelist);
block/blk-mq.c
1590
if (rq->rq_flags & RQF_DONTPREP)
block/blk-mq.c
1591
blk_mq_request_bypass_insert(rq, 0);
block/blk-mq.c
1593
blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
block/blk-mq.c
1597
rq = list_entry(flush_list.next, struct request, queuelist);
block/blk-mq.c
1598
list_del_init(&rq->queuelist);
block/blk-mq.c
1599
blk_mq_insert_request(rq, 0);
block/blk-mq.c
1619
static bool blk_is_flush_data_rq(struct request *rq)
block/blk-mq.c
1621
return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
block/blk-mq.c
1624
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
block/blk-mq.c
1636
if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
block/blk-mq.c
1637
blk_is_flush_data_rq(rq) &&
block/blk-mq.c
1638
blk_mq_request_completed(rq))) {
block/blk-mq.c
1678
static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
block/blk-mq.c
1682
if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
block/blk-mq.c
1684
if (rq->rq_flags & RQF_TIMED_OUT)
block/blk-mq.c
1687
deadline = READ_ONCE(rq->deadline);
block/blk-mq.c
1698
void blk_mq_put_rq_ref(struct request *rq)
block/blk-mq.c
1700
if (is_flush_rq(rq)) {
block/blk-mq.c
1701
if (rq->end_io(rq, 0, NULL) == RQ_END_IO_FREE)
block/blk-mq.c
1702
blk_mq_free_request(rq);
block/blk-mq.c
1703
} else if (req_ref_put_and_test(rq)) {
block/blk-mq.c
1704
__blk_mq_free_request(rq);
block/blk-mq.c
1708
static bool blk_mq_check_expired(struct request *rq, void *priv)
block/blk-mq.c
1719
if (blk_mq_req_expired(rq, expired)) {
block/blk-mq.c
1726
static bool blk_mq_handle_expired(struct request *rq, void *priv)
block/blk-mq.c
1730
if (blk_mq_req_expired(rq, expired))
block/blk-mq.c
1731
blk_mq_rq_timed_out(rq);
block/blk-mq.c
1829
struct request *rq;
block/blk-mq.c
1842
dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
block/blk-mq.c
1843
list_del_init(&dispatch_data->rq->queuelist);
block/blk-mq.c
1849
return !dispatch_data->rq;
block/blk-mq.c
1858
.rq = NULL,
block/blk-mq.c
1864
return data.rq;
block/blk-mq.c
1867
bool __blk_mq_alloc_driver_tag(struct request *rq)
block/blk-mq.c
1869
struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
block/blk-mq.c
1870
unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
block/blk-mq.c
1873
blk_mq_tag_busy(rq->mq_hctx);
block/blk-mq.c
1875
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
block/blk-mq.c
1876
bt = &rq->mq_hctx->tags->breserved_tags;
block/blk-mq.c
1879
if (!hctx_may_queue(rq->mq_hctx, bt))
block/blk-mq.c
1887
rq->tag = tag + tag_offset;
block/blk-mq.c
1888
blk_mq_inc_active_requests(rq->mq_hctx);
block/blk-mq.c
1920
struct request *rq)
block/blk-mq.c
1939
return blk_mq_get_driver_tag(rq);
block/blk-mq.c
1946
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
block/blk-mq.c
1985
ret = blk_mq_get_driver_tag(rq);
block/blk-mq.c
2032
static void blk_mq_handle_dev_resource(struct request *rq,
block/blk-mq.c
2035
list_add(&rq->queuelist, list);
block/blk-mq.c
2036
__blk_mq_requeue_request(rq);
block/blk-mq.c
2045
static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
block/blk-mq.c
2048
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
2052
budget_token = blk_mq_get_dispatch_budget(rq->q);
block/blk-mq.c
2054
blk_mq_put_driver_tag(rq);
block/blk-mq.c
2057
blk_mq_set_rq_budget_token(rq, budget_token);
block/blk-mq.c
2060
if (!blk_mq_get_driver_tag(rq)) {
block/blk-mq.c
2068
if (!blk_mq_mark_tag_wait(hctx, rq)) {
block/blk-mq.c
2074
blk_mq_put_dispatch_budget(rq->q, budget_token);
block/blk-mq.c
2086
struct request *rq;
block/blk-mq.c
2088
list_for_each_entry(rq, list, queuelist) {
block/blk-mq.c
2089
int budget_token = blk_mq_get_rq_budget_token(rq);
block/blk-mq.c
2121
struct request *rq;
block/blk-mq.c
2136
rq = list_first_entry(list, struct request, queuelist);
block/blk-mq.c
2138
WARN_ON_ONCE(hctx != rq->mq_hctx);
block/blk-mq.c
2139
prep = blk_mq_prep_dispatch_rq(rq, get_budget);
block/blk-mq.c
2143
list_del_init(&rq->queuelist);
block/blk-mq.c
2145
bd.rq = rq;
block/blk-mq.c
2157
blk_mq_handle_dev_resource(rq, list);
block/blk-mq.c
2160
blk_mq_end_request(rq, ret);
block/blk-mq.c
2574
static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
block/blk-mq.c
2576
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
2580
list_add(&rq->queuelist, &hctx->dispatch);
block/blk-mq.c
2582
list_add_tail(&rq->queuelist, &hctx->dispatch);
block/blk-mq.c
2590
struct request *rq;
block/blk-mq.c
2608
list_for_each_entry(rq, list, queuelist) {
block/blk-mq.c
2609
BUG_ON(rq->mq_ctx != ctx);
block/blk-mq.c
2610
trace_block_rq_insert(rq);
block/blk-mq.c
2611
if (rq->cmd_flags & REQ_NOWAIT)
block/blk-mq.c
2623
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
block/blk-mq.c
2625
struct request_queue *q = rq->q;
block/blk-mq.c
2626
struct blk_mq_ctx *ctx = rq->mq_ctx;
block/blk-mq.c
2627
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
2629
if (blk_rq_is_passthrough(rq)) {
block/blk-mq.c
2640
blk_mq_request_bypass_insert(rq, flags);
block/blk-mq.c
2641
} else if (req_op(rq) == REQ_OP_FLUSH) {
block/blk-mq.c
2663
blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
block/blk-mq.c
2667
WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
block/blk-mq.c
2669
list_add(&rq->queuelist, &list);
block/blk-mq.c
2672
trace_block_rq_insert(rq);
block/blk-mq.c
2676
list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
block/blk-mq.c
2678
list_add_tail(&rq->queuelist,
block/blk-mq.c
2685
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
block/blk-mq.c
2691
rq->cmd_flags |= REQ_FAILFAST_MASK;
block/blk-mq.c
2693
rq->bio = rq->biotail = bio;
block/blk-mq.c
2694
rq->__sector = bio->bi_iter.bi_sector;
block/blk-mq.c
2695
rq->__data_len = bio->bi_iter.bi_size;
block/blk-mq.c
2696
rq->phys_gap_bit = bio->bi_bvec_gap_bit;
block/blk-mq.c
2698
rq->nr_phys_segments = nr_segs;
block/blk-mq.c
2700
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
block/blk-mq.c
2704
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
block/blk-mq.c
2707
blk_account_io_start(rq);
block/blk-mq.c
2711
struct request *rq, bool last)
block/blk-mq.c
2713
struct request_queue *q = rq->q;
block/blk-mq.c
2715
.rq = rq,
block/blk-mq.c
2733
__blk_mq_requeue_request(rq);
block/blk-mq.c
2743
static bool blk_mq_get_budget_and_tag(struct request *rq)
block/blk-mq.c
2747
budget_token = blk_mq_get_dispatch_budget(rq->q);
block/blk-mq.c
2750
blk_mq_set_rq_budget_token(rq, budget_token);
block/blk-mq.c
2751
if (!blk_mq_get_driver_tag(rq)) {
block/blk-mq.c
2752
blk_mq_put_dispatch_budget(rq->q, budget_token);
block/blk-mq.c
2769
struct request *rq)
block/blk-mq.c
2773
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
block/blk-mq.c
2774
blk_mq_insert_request(rq, 0);
block/blk-mq.c
2779
if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
block/blk-mq.c
2780
blk_mq_insert_request(rq, 0);
block/blk-mq.c
2781
blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
block/blk-mq.c
2785
ret = __blk_mq_issue_directly(hctx, rq, true);
block/blk-mq.c
2791
blk_mq_request_bypass_insert(rq, 0);
block/blk-mq.c
2795
blk_mq_end_request(rq, ret);
block/blk-mq.c
2800
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
block/blk-mq.c
2802
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
2804
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
block/blk-mq.c
2805
blk_mq_insert_request(rq, 0);
block/blk-mq.c
2810
if (!blk_mq_get_budget_and_tag(rq))
block/blk-mq.c
2812
return __blk_mq_issue_directly(hctx, rq, last);
block/blk-mq.c
2818
struct request *rq;
block/blk-mq.c
2822
while ((rq = rq_list_pop(rqs))) {
block/blk-mq.c
2825
if (hctx != rq->mq_hctx) {
block/blk-mq.c
2830
hctx = rq->mq_hctx;
block/blk-mq.c
2833
ret = blk_mq_request_issue_directly(rq, last);
block/blk-mq.c
2840
blk_mq_request_bypass_insert(rq, 0);
block/blk-mq.c
2844
blk_mq_end_request(rq, ret);
block/blk-mq.c
2864
struct request *rq = rq_list_pop(rqs);
block/blk-mq.c
2865
struct request_queue *this_q = rq->q;
block/blk-mq.c
2871
rq_list_add_tail(&matched_rqs, rq);
block/blk-mq.c
2872
while ((rq = *prev)) {
block/blk-mq.c
2873
if (rq->q == this_q) {
block/blk-mq.c
2875
*prev = rq->rq_next;
block/blk-mq.c
2876
rq_list_add_tail(&matched_rqs, rq);
block/blk-mq.c
2880
prev = &rq->rq_next;
block/blk-mq.c
2881
last = rq;
block/blk-mq.c
2921
struct request *rq = rq_list_pop(rqs);
block/blk-mq.c
2924
this_hctx = rq->mq_hctx;
block/blk-mq.c
2925
this_ctx = rq->mq_ctx;
block/blk-mq.c
2926
is_passthrough = blk_rq_is_passthrough(rq);
block/blk-mq.c
2927
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
block/blk-mq.c
2928
is_passthrough != blk_rq_is_passthrough(rq)) {
block/blk-mq.c
2929
rq_list_add_tail(&requeue_list, rq);
block/blk-mq.c
2932
list_add_tail(&rq->queuelist, &list);
block/blk-mq.c
3008
struct request *rq = list_first_entry(list, struct request,
block/blk-mq.c
3011
list_del_init(&rq->queuelist);
block/blk-mq.c
3012
ret = blk_mq_request_issue_directly(rq, list_empty(list));
block/blk-mq.c
3019
blk_mq_request_bypass_insert(rq, 0);
block/blk-mq.c
3024
blk_mq_end_request(rq, ret);
block/blk-mq.c
3061
struct request *rq;
block/blk-mq.c
3071
rq = __blk_mq_alloc_requests(&data);
block/blk-mq.c
3072
if (unlikely(!rq))
block/blk-mq.c
3074
return rq;
block/blk-mq.c
3084
struct request *rq;
block/blk-mq.c
3088
rq = rq_list_peek(&plug->cached_rqs);
block/blk-mq.c
3089
if (!rq || rq->q != q)
block/blk-mq.c
3091
if (type != rq->mq_hctx->type &&
block/blk-mq.c
3092
(type != HCTX_TYPE_READ || rq->mq_hctx->type != HCTX_TYPE_DEFAULT))
block/blk-mq.c
3094
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
block/blk-mq.c
3096
return rq;
block/blk-mq.c
3099
static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
block/blk-mq.c
3102
if (rq_list_pop(&plug->cached_rqs) != rq)
block/blk-mq.c
3110
rq_qos_throttle(rq->q, bio);
block/blk-mq.c
3112
blk_mq_rq_time_init(rq, blk_time_get_ns());
block/blk-mq.c
3113
rq->cmd_flags = bio->bi_opf;
block/blk-mq.c
3114
INIT_LIST_HEAD(&rq->queuelist);
block/blk-mq.c
3148
struct request *rq;
block/blk-mq.c
3154
rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
block/blk-mq.c
3164
if (rq)
block/blk-mq.c
3173
if (!rq) {
block/blk-mq.c
3211
if (rq) {
block/blk-mq.c
3212
blk_mq_use_cached_rq(rq, plug, bio);
block/blk-mq.c
3214
rq = blk_mq_get_new_requests(q, plug, bio);
block/blk-mq.c
3215
if (unlikely(!rq)) {
block/blk-mq.c
3224
rq_qos_track(q, rq, bio);
block/blk-mq.c
3226
blk_mq_bio_to_request(rq, bio, nr_segs);
block/blk-mq.c
3228
ret = blk_crypto_rq_get_keyslot(rq);
block/blk-mq.c
3232
blk_mq_free_request(rq);
block/blk-mq.c
3237
blk_zone_write_plug_init_request(rq);
block/blk-mq.c
3239
if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
block/blk-mq.c
3243
blk_add_rq_to_plug(plug, rq);
block/blk-mq.c
3247
hctx = rq->mq_hctx;
block/blk-mq.c
3248
if ((rq->rq_flags & RQF_USE_SCHED) ||
block/blk-mq.c
3250
blk_mq_insert_request(rq, 0);
block/blk-mq.c
3253
blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
block/blk-mq.c
3262
if (!rq)
block/blk-mq.c
3271
blk_status_t blk_insert_cloned_request(struct request *rq)
block/blk-mq.c
3273
struct request_queue *q = rq->q;
block/blk-mq.c
3274
unsigned int max_sectors = blk_queue_get_max_sectors(rq);
block/blk-mq.c
3275
unsigned int max_segments = blk_rq_get_max_segments(rq);
block/blk-mq.c
3278
if (blk_rq_sectors(rq) > max_sectors) {
block/blk-mq.c
3293
__func__, blk_rq_sectors(rq), max_sectors);
block/blk-mq.c
3301
rq->nr_phys_segments = blk_recalc_rq_segments(rq);
block/blk-mq.c
3302
if (rq->nr_phys_segments > max_segments) {
block/blk-mq.c
3304
__func__, rq->nr_phys_segments, max_segments);
block/blk-mq.c
3308
if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
block/blk-mq.c
3311
ret = blk_crypto_rq_get_keyslot(rq);
block/blk-mq.c
3315
blk_account_io_start(rq);
block/blk-mq.c
3323
ret = blk_mq_request_issue_directly(rq, true));
block/blk-mq.c
3325
blk_account_io_done(rq, blk_time_get_ns());
block/blk-mq.c
3337
void blk_rq_unprep_clone(struct request *rq)
block/blk-mq.c
3341
while ((bio = rq->bio) != NULL) {
block/blk-mq.c
3342
rq->bio = bio->bi_next;
block/blk-mq.c
3366
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
block/blk-mq.c
3377
struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src,
block/blk-mq.c
3387
if (rq->bio) {
block/blk-mq.c
3388
rq->biotail->bi_next = bio;
block/blk-mq.c
3389
rq->biotail = bio;
block/blk-mq.c
3391
rq->bio = rq->biotail = bio;
block/blk-mq.c
3396
rq->__sector = blk_rq_pos(rq_src);
block/blk-mq.c
3397
rq->__data_len = blk_rq_bytes(rq_src);
block/blk-mq.c
3399
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
block/blk-mq.c
3400
rq->special_vec = rq_src->special_vec;
block/blk-mq.c
3402
rq->nr_phys_segments = rq_src->nr_phys_segments;
block/blk-mq.c
3403
rq->nr_integrity_segments = rq_src->nr_integrity_segments;
block/blk-mq.c
3404
rq->phys_gap_bit = rq_src->phys_gap_bit;
block/blk-mq.c
3406
if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
block/blk-mq.c
3412
blk_rq_unprep_clone(rq);
block/blk-mq.c
3423
void blk_steal_bios(struct bio_list *list, struct request *rq)
block/blk-mq.c
3425
if (rq->bio) {
block/blk-mq.c
3427
list->tail->bi_next = rq->bio;
block/blk-mq.c
3429
list->head = rq->bio;
block/blk-mq.c
3430
list->tail = rq->biotail;
block/blk-mq.c
3432
rq->bio = NULL;
block/blk-mq.c
3433
rq->biotail = NULL;
block/blk-mq.c
3436
rq->__data_len = 0;
block/blk-mq.c
3464
struct request *rq = drv_tags->rqs[i];
block/blk-mq.c
3465
unsigned long rq_addr = (unsigned long)rq;
block/blk-mq.c
3468
WARN_ON_ONCE(req_ref_read(rq) != 0);
block/blk-mq.c
3469
cmpxchg(&drv_tags->rqs[i], rq, NULL);
block/blk-mq.c
3492
struct request *rq = tags->static_rqs[i];
block/blk-mq.c
3494
if (!rq)
block/blk-mq.c
3496
set->ops->exit_request(set, rq, hctx_idx);
block/blk-mq.c
3581
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
block/blk-mq.c
3587
ret = set->ops->init_request(set, rq, hctx_idx, node);
block/blk-mq.c
3592
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
block/blk-mq.c
3652
struct request *rq = p;
block/blk-mq.c
3654
tags->static_rqs[i] = rq;
block/blk-mq.c
3655
if (blk_mq_init_request(set, rq, hctx_idx, node)) {
block/blk-mq.c
3676
static bool blk_mq_has_request(struct request *rq, void *data)
block/blk-mq.c
3680
if (rq->mq_hctx != iter_data->hctx)
block/blk-mq.c
373
void blk_rq_init(struct request_queue *q, struct request *rq)
block/blk-mq.c
375
memset(rq, 0, sizeof(*rq));
block/blk-mq.c
377
INIT_LIST_HEAD(&rq->queuelist);
block/blk-mq.c
378
rq->q = q;
block/blk-mq.c
379
rq->__sector = (sector_t) -1;
block/blk-mq.c
380
rq->phys_gap_bit = 0;
block/blk-mq.c
381
INIT_HLIST_NODE(&rq->hash);
block/blk-mq.c
382
RB_CLEAR_NODE(&rq->rb_node);
block/blk-mq.c
383
rq->tag = BLK_MQ_NO_TAG;
block/blk-mq.c
384
rq->internal_tag = BLK_MQ_NO_TAG;
block/blk-mq.c
385
rq->start_time_ns = blk_time_get_ns();
block/blk-mq.c
386
blk_crypto_rq_set_defaults(rq);
block/blk-mq.c
391
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
block/blk-mq.c
394
if (blk_queue_rq_alloc_time(rq->q))
block/blk-mq.c
395
rq->alloc_time_ns = alloc_time_ns;
block/blk-mq.c
397
rq->alloc_time_ns = 0;
block/blk-mq.c
416
struct request *rq = tags->static_rqs[tag];
block/blk-mq.c
418
rq->q = q;
block/blk-mq.c
419
rq->mq_ctx = ctx;
block/blk-mq.c
420
rq->mq_hctx = hctx;
block/blk-mq.c
421
rq->cmd_flags = data->cmd_flags;
block/blk-mq.c
425
rq->rq_flags = data->rq_flags;
block/blk-mq.c
428
rq->tag = BLK_MQ_NO_TAG;
block/blk-mq.c
429
rq->internal_tag = tag;
block/blk-mq.c
431
rq->tag = tag;
block/blk-mq.c
432
rq->internal_tag = BLK_MQ_NO_TAG;
block/blk-mq.c
434
rq->timeout = 0;
block/blk-mq.c
436
rq->part = NULL;
block/blk-mq.c
437
rq->io_start_time_ns = 0;
block/blk-mq.c
438
rq->stats_sectors = 0;
block/blk-mq.c
439
rq->nr_phys_segments = 0;
block/blk-mq.c
440
rq->nr_integrity_segments = 0;
block/blk-mq.c
441
rq->end_io = NULL;
block/blk-mq.c
442
rq->end_io_data = NULL;
block/blk-mq.c
444
blk_crypto_rq_set_defaults(rq);
block/blk-mq.c
445
INIT_LIST_HEAD(&rq->queuelist);
block/blk-mq.c
447
WRITE_ONCE(rq->deadline, 0);
block/blk-mq.c
448
req_ref_set(rq, 1);
block/blk-mq.c
450
if (rq->rq_flags & RQF_USE_SCHED) {
block/blk-mq.c
453
INIT_HLIST_NODE(&rq->hash);
block/blk-mq.c
454
RB_CLEAR_NODE(&rq->rb_node);
block/blk-mq.c
457
e->type->ops.prepare_request(rq);
block/blk-mq.c
460
return rq;
block/blk-mq.c
468
struct request *rq;
block/blk-mq.c
486
rq = blk_mq_rq_ctx_init(data, tags, tag);
block/blk-mq.c
487
rq_list_add_head(data->cached_rqs, rq);
block/blk-mq.c
49
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
block/blk-mq.c
50
static void blk_mq_request_bypass_insert(struct request *rq,
block/blk-mq.c
5253
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
block/blk-mq.c
5256
struct request_queue *q = rq->q;
block/blk-mq.c
5259
if (!blk_rq_is_poll(rq))
block/blk-mq.c
5264
ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
block/blk-mq.c
5271
unsigned int blk_mq_rq_cpu(struct request *rq)
block/blk-mq.c
5273
return rq->mq_ctx->cpu;
block/blk-mq.c
541
struct request *rq;
block/blk-mq.c
563
rq = __blk_mq_alloc_requests_batch(data);
block/blk-mq.c
564
if (rq) {
block/blk-mq.c
565
blk_mq_rq_time_init(rq, alloc_time_ns);
block/blk-mq.c
566
return rq;
block/blk-mq.c
592
rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
block/blk-mq.c
593
blk_mq_rq_time_init(rq, alloc_time_ns);
block/blk-mq.c
594
return rq;
block/blk-mq.c
613
struct request *rq;
block/blk-mq.c
620
rq = __blk_mq_alloc_requests(&data);
block/blk-mq.c
621
if (unlikely(!rq))
block/blk-mq.c
623
return rq;
block/blk-mq.c
631
struct request *rq;
block/blk-mq.c
639
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
block/blk-mq.c
640
if (!rq)
block/blk-mq.c
643
rq = rq_list_peek(&plug->cached_rqs);
block/blk-mq.c
644
if (!rq || rq->q != q)
block/blk-mq.c
647
if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
block/blk-mq.c
649
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
block/blk-mq.c
653
blk_mq_rq_time_init(rq, blk_time_get_ns());
block/blk-mq.c
656
rq->cmd_flags = opf;
block/blk-mq.c
657
INIT_LIST_HEAD(&rq->queuelist);
block/blk-mq.c
658
return rq;
block/blk-mq.c
664
struct request *rq;
block/blk-mq.c
666
rq = blk_mq_alloc_cached_request(q, opf, flags);
block/blk-mq.c
667
if (!rq) {
block/blk-mq.c
685
rq = __blk_mq_alloc_requests(&data);
block/blk-mq.c
686
if (!rq)
block/blk-mq.c
689
rq->__data_len = 0;
block/blk-mq.c
690
rq->phys_gap_bit = 0;
block/blk-mq.c
691
rq->__sector = (sector_t) -1;
block/blk-mq.c
692
rq->bio = rq->biotail = NULL;
block/blk-mq.c
693
return rq;
block/blk-mq.c
715
struct request *rq;
block/blk-mq.c
768
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
block/blk-mq.c
769
blk_mq_rq_time_init(rq, alloc_time_ns);
block/blk-mq.c
770
rq->__data_len = 0;
block/blk-mq.c
771
rq->phys_gap_bit = 0;
block/blk-mq.c
772
rq->__sector = (sector_t) -1;
block/blk-mq.c
773
rq->bio = rq->biotail = NULL;
block/blk-mq.c
774
return rq;
block/blk-mq.c
782
static void blk_mq_finish_request(struct request *rq)
block/blk-mq.c
784
struct request_queue *q = rq->q;
block/blk-mq.c
786
blk_zone_finish_request(rq);
block/blk-mq.c
788
if (rq->rq_flags & RQF_USE_SCHED) {
block/blk-mq.c
789
q->elevator->type->ops.finish_request(rq);
block/blk-mq.c
795
rq->rq_flags &= ~RQF_USE_SCHED;
block/blk-mq.c
799
static void __blk_mq_free_request(struct request *rq)
block/blk-mq.c
801
struct request_queue *q = rq->q;
block/blk-mq.c
802
struct blk_mq_ctx *ctx = rq->mq_ctx;
block/blk-mq.c
803
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
block/blk-mq.c
804
const int sched_tag = rq->internal_tag;
block/blk-mq.c
806
blk_crypto_free_request(rq);
block/blk-mq.c
807
blk_pm_mark_last_busy(rq);
block/blk-mq.c
808
rq->mq_hctx = NULL;
block/blk-mq.c
810
if (rq->tag != BLK_MQ_NO_TAG) {
block/blk-mq.c
812
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
block/blk-mq.c
820
void blk_mq_free_request(struct request *rq)
block/blk-mq.c
822
struct request_queue *q = rq->q;
block/blk-mq.c
824
blk_mq_finish_request(rq);
block/blk-mq.c
826
rq_qos_done(q, rq);
block/blk-mq.c
828
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
block/blk-mq.c
829
if (req_ref_put_and_test(rq))
block/blk-mq.c
830
__blk_mq_free_request(rq);
block/blk-mq.c
836
struct request *rq;
block/blk-mq.c
838
while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL)
block/blk-mq.c
839
blk_mq_free_request(rq);
block/blk-mq.c
842
void blk_dump_rq_flags(struct request *rq, char *msg)
block/blk-mq.c
845
rq->q->disk ? rq->q->disk->disk_name : "?",
block/blk-mq.c
846
(__force unsigned long long) rq->cmd_flags);
block/blk-mq.c
849
(unsigned long long)blk_rq_pos(rq),
block/blk-mq.c
850
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
block/blk-mq.c
852
rq->bio, rq->biotail, blk_rq_bytes(rq));
block/blk-mq.c
93
static bool blk_mq_check_in_driver(struct request *rq, void *priv)
block/blk-mq.c
97
if (rq->rq_flags & RQF_IO_STAT &&
block/blk-mq.c
98
(!bdev_is_partition(mi->part) || rq->part == mi->part) &&
block/blk-mq.c
99
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
block/blk-mq.h
278
static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
block/blk-mq.h
283
if (rq->q->mq_ops->set_rq_budget_token)
block/blk-mq.h
284
rq->q->mq_ops->set_rq_budget_token(rq, token);
block/blk-mq.h
287
static inline int blk_mq_get_rq_budget_token(struct request *rq)
block/blk-mq.h
289
if (rq->q->mq_ops->get_rq_budget_token)
block/blk-mq.h
290
return rq->q->mq_ops->get_rq_budget_token(rq);
block/blk-mq.h
355
struct request *rq)
block/blk-mq.h
358
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
block/blk-mq.h
359
rq->tag = BLK_MQ_NO_TAG;
block/blk-mq.h
362
static inline void blk_mq_put_driver_tag(struct request *rq)
block/blk-mq.h
364
if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
block/blk-mq.h
367
__blk_mq_put_driver_tag(rq->mq_hctx, rq);
block/blk-mq.h
370
bool __blk_mq_alloc_driver_tag(struct request *rq);
block/blk-mq.h
372
static inline bool blk_mq_get_driver_tag(struct request *rq)
block/blk-mq.h
374
if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
block/blk-mq.h
392
struct request *rq = list_entry_rq(list->next);
block/blk-mq.h
394
list_del_init(&rq->queuelist);
block/blk-mq.h
395
blk_mq_free_request(rq);
block/blk-mq.h
58
void blk_mq_put_rq_ref(struct request *rq);
block/blk-pm.h
19
static inline void blk_pm_mark_last_busy(struct request *rq)
block/blk-pm.h
21
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
block/blk-pm.h
22
pm_runtime_mark_last_busy(rq->q->dev);
block/blk-pm.h
30
static inline void blk_pm_mark_last_busy(struct request *rq)
block/blk-rq-qos.c
35
void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
block/blk-rq-qos.c
39
rqos->ops->done(rqos, rq);
block/blk-rq-qos.c
44
void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
block/blk-rq-qos.c
48
rqos->ops->issue(rqos, rq);
block/blk-rq-qos.c
53
void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
block/blk-rq-qos.c
57
rqos->ops->requeue(rqos, rq);
block/blk-rq-qos.c
71
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
block/blk-rq-qos.c
75
rqos->ops->track(rqos, rq, bio);
block/blk-rq-qos.c
80
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
block/blk-rq-qos.c
84
rqos->ops->merge(rqos, rq, bio);
block/blk-rq-qos.h
104
void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
block/blk-rq-qos.h
105
void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
block/blk-rq-qos.h
106
void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
block/blk-rq-qos.h
108
void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
block/blk-rq-qos.h
109
void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
block/blk-rq-qos.h
119
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
block/blk-rq-qos.h
122
q->rq_qos && !blk_rq_is_passthrough(rq))
block/blk-rq-qos.h
123
__rq_qos_done(q->rq_qos, rq);
block/blk-rq-qos.h
126
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
block/blk-rq-qos.h
129
__rq_qos_issue(q->rq_qos, rq);
block/blk-rq-qos.h
132
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
block/blk-rq-qos.h
135
__rq_qos_requeue(q->rq_qos, rq);
block/blk-rq-qos.h
168
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
block/blk-rq-qos.h
172
__rq_qos_track(q->rq_qos, rq, bio);
block/blk-rq-qos.h
175
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
block/blk-rq-qos.h
180
__rq_qos_merge(q->rq_qos, rq, bio);
block/blk-stat.c
50
void blk_stat_add(struct request *rq, u64 now)
block/blk-stat.c
52
struct request_queue *q = rq->q;
block/blk-stat.c
58
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
block/blk-stat.c
66
bucket = cb->bucket_fn(rq);
block/blk-stat.h
71
void blk_stat_add(struct request *rq, u64 now);
block/blk-wbt.c
103
static inline void wbt_clear_state(struct request *rq)
block/blk-wbt.c
105
rq->wbt_flags = 0;
block/blk-wbt.c
108
static inline enum wbt_flags wbt_flags(struct request *rq)
block/blk-wbt.c
110
return rq->wbt_flags;
block/blk-wbt.c
113
static inline bool wbt_is_tracked(struct request *rq)
block/blk-wbt.c
115
return rq->wbt_flags & WBT_TRACKED;
block/blk-wbt.c
118
static inline bool wbt_is_read(struct request *rq)
block/blk-wbt.c
120
return rq->wbt_flags & WBT_READ;
block/blk-wbt.c
248
static void wbt_done(struct rq_qos *rqos, struct request *rq)
block/blk-wbt.c
252
if (!wbt_is_tracked(rq)) {
block/blk-wbt.c
253
if (wbt_is_read(rq)) {
block/blk-wbt.c
254
if (rwb->sync_cookie == rq) {
block/blk-wbt.c
262
WARN_ON_ONCE(rq == rwb->sync_cookie);
block/blk-wbt.c
263
__wbt_done(rqos, wbt_flags(rq));
block/blk-wbt.c
265
wbt_clear_state(rq);
block/blk-wbt.c
664
static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
block/blk-wbt.c
667
rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
block/blk-wbt.c
670
static void wbt_issue(struct rq_qos *rqos, struct request *rq)
block/blk-wbt.c
684
if (wbt_is_read(rq) && !rwb->sync_issue) {
block/blk-wbt.c
685
rwb->sync_cookie = rq;
block/blk-wbt.c
686
rwb->sync_issue = rq->io_start_time_ns;
block/blk-wbt.c
690
static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
block/blk-wbt.c
695
if (rq == rwb->sync_cookie) {
block/blk-wbt.c
701
static int wbt_data_dir(const struct request *rq)
block/blk-wbt.c
703
const enum req_op op = req_op(rq);
block/blk-zoned.c
1644
void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio)
block/blk-zoned.c
1654
bio->bi_iter.bi_sector = rq->__sector;
block/blk-zoned.c
1655
trace_blk_zone_append_update_request_bio(rq);
block/blk.h
161
static inline bool rq_mergeable(struct request *rq)
block/blk.h
163
if (blk_rq_is_passthrough(rq))
block/blk.h
166
if (req_op(rq) == REQ_OP_FLUSH)
block/blk.h
169
if (req_op(rq) == REQ_OP_WRITE_ZEROES)
block/blk.h
172
if (req_op(rq) == REQ_OP_ZONE_APPEND)
block/blk.h
175
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
block/blk.h
177
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
block/blk.h
199
static inline unsigned int blk_rq_get_max_segments(struct request *rq)
block/blk.h
201
if (req_op(rq) == REQ_OP_DISCARD)
block/blk.h
202
return queue_max_discard_segments(rq->q);
block/blk.h
203
return queue_max_segments(rq->q);
block/blk.h
206
static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
block/blk.h
208
struct request_queue *q = rq->q;
block/blk.h
209
enum req_op op = req_op(rq);
block/blk.h
222
if (rq->cmd_flags & REQ_ATOMIC)
block/blk.h
277
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
block/blk.h
282
static inline bool blk_integrity_merge_bio(struct request_queue *rq,
block/blk.h
335
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
block/blk.h
337
bool blk_insert_flush(struct request *rq);
block/blk.h
452
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
block/blk.h
454
unsigned int blk_recalc_rq_segments(struct request *rq);
block/blk.h
455
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
block/blk.h
456
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
block/blk.h
492
static inline bool blk_req_bio_is_zone_append(struct request *rq,
block/blk.h
495
return req_op(rq) == REQ_OP_ZONE_APPEND ||
block/blk.h
499
void blk_zone_write_plug_init_request(struct request *rq);
block/blk.h
500
void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio);
block/blk.h
526
void blk_zone_write_plug_finish_request(struct request *rq);
block/blk.h
527
static inline void blk_zone_finish_request(struct request *rq)
block/blk.h
529
if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
block/blk.h
530
blk_zone_write_plug_finish_request(rq);
block/blk.h
555
static inline void blk_zone_write_plug_init_request(struct request *rq)
block/blk.h
558
static inline void blk_zone_append_update_request_bio(struct request *rq,
block/blk.h
565
static inline void blk_zone_finish_request(struct request *rq)
block/blk.h
704
void blk_integrity_prepare(struct request *rq);
block/blk.h
705
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
block/bsg-lib.c
148
blk_mq_free_request(rq);
block/bsg-lib.c
159
struct request *rq = blk_mq_rq_from_pdu(job);
block/bsg-lib.c
166
blk_mq_end_request(rq, BLK_STS_OK);
block/bsg-lib.c
192
struct request *rq = blk_mq_rq_from_pdu(job);
block/bsg-lib.c
196
if (likely(!blk_should_fake_timeout(rq->q)))
block/bsg-lib.c
197
blk_mq_complete_request(rq);
block/bsg-lib.c
205
static void bsg_complete(struct request *rq)
block/bsg-lib.c
207
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
block/bsg-lib.c
277
struct request *req = bd->rq;
block/bsg-lib.c
32
struct request *rq;
block/bsg-lib.c
335
static enum blk_eh_timer_return bsg_timeout(struct request *rq)
block/bsg-lib.c
338
container_of(rq->q->tag_set, struct bsg_set, tag_set);
block/bsg-lib.c
342
return bset->timeout_fn(rq);
block/bsg-lib.c
43
rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
block/bsg-lib.c
45
if (IS_ERR(rq))
block/bsg-lib.c
46
return PTR_ERR(rq);
block/bsg-lib.c
47
rq->timeout = timeout;
block/bsg-lib.c
49
job = blk_mq_rq_to_pdu(rq);
block/bsg-lib.c
64
job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
block/bsg-lib.c
70
ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
block/bsg-lib.c
84
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
block/bsg-lib.c
87
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
block/bsg-lib.c
94
bio = rq->bio;
block/bsg-lib.c
95
blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
block/elevator.c
165
static inline void __elv_rqhash_del(struct request *rq)
block/elevator.c
167
hash_del(&rq->hash);
block/elevator.c
168
rq->rq_flags &= ~RQF_HASHED;
block/elevator.c
171
void elv_rqhash_del(struct request_queue *q, struct request *rq)
block/elevator.c
173
if (ELV_ON_HASH(rq))
block/elevator.c
174
__elv_rqhash_del(rq);
block/elevator.c
178
void elv_rqhash_add(struct request_queue *q, struct request *rq)
block/elevator.c
182
BUG_ON(ELV_ON_HASH(rq));
block/elevator.c
183
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
block/elevator.c
184
rq->rq_flags |= RQF_HASHED;
block/elevator.c
188
void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
block/elevator.c
190
__elv_rqhash_del(rq);
block/elevator.c
191
elv_rqhash_add(q, rq);
block/elevator.c
198
struct request *rq;
block/elevator.c
200
hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
block/elevator.c
201
BUG_ON(!ELV_ON_HASH(rq));
block/elevator.c
203
if (unlikely(!rq_mergeable(rq))) {
block/elevator.c
204
__elv_rqhash_del(rq);
block/elevator.c
208
if (rq_hash_key(rq) == offset)
block/elevator.c
209
return rq;
block/elevator.c
219
void elv_rb_add(struct rb_root *root, struct request *rq)
block/elevator.c
229
if (blk_rq_pos(rq) < blk_rq_pos(__rq))
block/elevator.c
231
else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
block/elevator.c
235
rb_link_node(&rq->rb_node, parent, p);
block/elevator.c
236
rb_insert_color(&rq->rb_node, root);
block/elevator.c
240
void elv_rb_del(struct rb_root *root, struct request *rq)
block/elevator.c
242
BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
block/elevator.c
243
rb_erase(&rq->rb_node, root);
block/elevator.c
244
RB_CLEAR_NODE(&rq->rb_node);
block/elevator.c
251
struct request *rq;
block/elevator.c
254
rq = rb_entry(n, struct request, rb_node);
block/elevator.c
256
if (sector < blk_rq_pos(rq))
block/elevator.c
258
else if (sector > blk_rq_pos(rq))
block/elevator.c
261
return rq;
block/elevator.c
324
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
block/elevator.c
336
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
block/elevator.c
337
list_add(&rq->queuelist, free);
block/elevator.c
349
__rq = elv_rqhash_find(q, blk_rq_pos(rq));
block/elevator.c
350
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
block/elevator.c
353
list_add(&rq->queuelist, free);
block/elevator.c
356
rq = __rq;
block/elevator.c
362
void elv_merged_request(struct request_queue *q, struct request *rq,
block/elevator.c
368
e->type->ops.request_merged(q, rq, type);
block/elevator.c
371
elv_rqhash_reposition(q, rq);
block/elevator.c
373
q->last_merge = rq;
block/elevator.c
376
void elv_merge_requests(struct request_queue *q, struct request *rq,
block/elevator.c
382
e->type->ops.requests_merged(q, rq, next);
block/elevator.c
384
elv_rqhash_reposition(q, rq);
block/elevator.c
385
q->last_merge = rq;
block/elevator.c
388
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
block/elevator.c
393
return e->type->ops.next_request(q, rq);
block/elevator.c
398
struct request *elv_former_request(struct request_queue *q, struct request *rq)
block/elevator.c
403
return e->type->ops.former_request(q, rq);
block/elevator.c
54
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
block/elevator.c
60
static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
block/elevator.c
62
struct request_queue *q = rq->q;
block/elevator.c
66
return e->type->ops.allow_merge(q, rq, bio);
block/elevator.c
74
bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
block/elevator.c
76
if (!blk_rq_merge_ok(rq, bio))
block/elevator.c
79
if (!elv_iosched_allow_bio_merge(rq, bio))
block/elevator.c
865
struct request *rq)
block/elevator.c
867
struct rb_node *rbprev = rb_prev(&rq->rb_node);
block/elevator.c
877
struct request *rq)
block/elevator.c
879
struct rb_node *rbnext = rb_next(&rq->rb_node);
block/elevator.h
138
void elv_rqhash_del(struct request_queue *q, struct request *rq);
block/elevator.h
139
void elv_rqhash_add(struct request_queue *q, struct request *rq);
block/elevator.h
140
void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
block/elevator.h
216
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
block/kyber-iosched.c
523
static int rq_get_domain_token(struct request *rq)
block/kyber-iosched.c
525
return (long)rq->elv.priv[0];
block/kyber-iosched.c
528
static void rq_set_domain_token(struct request *rq, int token)
block/kyber-iosched.c
530
rq->elv.priv[0] = (void *)(long)token;
block/kyber-iosched.c
534
struct request *rq)
block/kyber-iosched.c
539
nr = rq_get_domain_token(rq);
block/kyber-iosched.c
541
sched_domain = kyber_sched_domain(rq->cmd_flags);
block/kyber-iosched.c
543
rq->mq_ctx->cpu);
block/kyber-iosched.c
571
static void kyber_prepare_request(struct request *rq)
block/kyber-iosched.c
573
rq_set_domain_token(rq, -1);
block/kyber-iosched.c
581
struct request *rq, *next;
block/kyber-iosched.c
583
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
block/kyber-iosched.c
584
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
block/kyber-iosched.c
585
struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
block/kyber-iosched.c
589
trace_block_rq_insert(rq);
block/kyber-iosched.c
591
list_move(&rq->queuelist, head);
block/kyber-iosched.c
593
list_move_tail(&rq->queuelist, head);
block/kyber-iosched.c
595
rq->mq_ctx->index_hw[hctx->type]);
block/kyber-iosched.c
600
static void kyber_finish_request(struct request *rq)
block/kyber-iosched.c
602
struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
block/kyber-iosched.c
604
rq_clear_domain_token(kqd, rq);
block/kyber-iosched.c
625
static void kyber_completed_request(struct request *rq, u64 now)
block/kyber-iosched.c
627
struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
block/kyber-iosched.c
632
sched_domain = kyber_sched_domain(rq->cmd_flags);
block/kyber-iosched.c
639
target, now - rq->start_time_ns);
block/kyber-iosched.c
641
now - rq->io_start_time_ns);
block/kyber-iosched.c
745
struct request *rq;
block/kyber-iosched.c
758
rq = list_first_entry_or_null(rqs, struct request, queuelist);
block/kyber-iosched.c
759
if (rq) {
block/kyber-iosched.c
763
rq_set_domain_token(rq, nr);
block/kyber-iosched.c
764
list_del_init(&rq->queuelist);
block/kyber-iosched.c
765
return rq;
block/kyber-iosched.c
774
rq = list_first_entry(rqs, struct request, queuelist);
block/kyber-iosched.c
776
rq_set_domain_token(rq, nr);
block/kyber-iosched.c
777
list_del_init(&rq->queuelist);
block/kyber-iosched.c
778
return rq;
block/kyber-iosched.c
793
struct request *rq;
block/kyber-iosched.c
803
rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
block/kyber-iosched.c
804
if (rq)
block/kyber-iosched.c
824
rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
block/kyber-iosched.c
825
if (rq)
block/kyber-iosched.c
829
rq = NULL;
block/kyber-iosched.c
832
return rq;
block/mq-deadline.c
115
deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
block/mq-deadline.c
117
return &per_prio->sort_list[rq_data_dir(rq)];
block/mq-deadline.c
124
static u8 dd_rq_ioclass(struct request *rq)
block/mq-deadline.c
126
return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
block/mq-deadline.c
136
struct request *rq, *res = NULL;
block/mq-deadline.c
139
rq = rb_entry_rq(node);
block/mq-deadline.c
140
if (blk_rq_pos(rq) >= pos) {
block/mq-deadline.c
141
res = rq;
block/mq-deadline.c
151
deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
block/mq-deadline.c
153
struct rb_root *root = deadline_rb_root(per_prio, rq);
block/mq-deadline.c
155
elv_rb_add(root, rq);
block/mq-deadline.c
159
deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
block/mq-deadline.c
161
elv_rb_del(deadline_rb_root(per_prio, rq), rq);
block/mq-deadline.c
169
struct request *rq)
block/mq-deadline.c
171
list_del_init(&rq->queuelist);
block/mq-deadline.c
176
if (!RB_EMPTY_NODE(&rq->rb_node))
block/mq-deadline.c
177
deadline_del_rq_rb(per_prio, rq);
block/mq-deadline.c
179
elv_rqhash_del(q, rq);
block/mq-deadline.c
180
if (q->last_merge == rq)
block/mq-deadline.c
238
struct request *rq)
block/mq-deadline.c
243
deadline_remove_request(rq->q, per_prio, rq);
block/mq-deadline.c
263
struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
block/mq-deadline.c
265
return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
block/mq-deadline.c
298
static bool started_after(struct deadline_data *dd, struct request *rq,
block/mq-deadline.c
301
unsigned long start_time = (unsigned long)rq->fifo_time;
block/mq-deadline.c
303
start_time -= dd->fifo_expire[rq_data_dir(rq)];
block/mq-deadline.c
310
struct request *rq)
block/mq-deadline.c
312
u8 ioprio_class = dd_rq_ioclass(rq);
block/mq-deadline.c
315
dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
block/mq-deadline.c
317
rq->rq_flags |= RQF_STARTED;
block/mq-deadline.c
318
return rq;
block/mq-deadline.c
329
struct request *rq, *next_rq;
block/mq-deadline.c
337
rq = deadline_next_request(dd, per_prio, dd->last_dir);
block/mq-deadline.c
338
if (rq && dd->batching < dd->fifo_batch) {
block/mq-deadline.c
340
data_dir = rq_data_dir(rq);
block/mq-deadline.c
389
rq = deadline_fifo_request(dd, per_prio, data_dir);
block/mq-deadline.c
395
rq = next_rq;
block/mq-deadline.c
398
if (!rq)
block/mq-deadline.c
405
if (started_after(dd, rq, latest_start))
block/mq-deadline.c
412
deadline_move_request(dd, per_prio, rq);
block/mq-deadline.c
413
return dd_start_request(dd, data_dir, rq);
block/mq-deadline.c
423
struct request *rq;
block/mq-deadline.c
435
rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
block/mq-deadline.c
437
if (rq)
block/mq-deadline.c
438
return rq;
block/mq-deadline.c
456
struct request *rq;
block/mq-deadline.c
462
rq = list_first_entry(&dd->dispatch, struct request, queuelist);
block/mq-deadline.c
463
list_del_init(&rq->queuelist);
block/mq-deadline.c
464
dd_start_request(dd, rq_data_dir(rq), rq);
block/mq-deadline.c
468
rq = dd_dispatch_prio_aged_requests(dd, now);
block/mq-deadline.c
469
if (rq)
block/mq-deadline.c
477
rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
block/mq-deadline.c
478
if (rq || dd_queued(dd, prio))
block/mq-deadline.c
485
return rq;
block/mq-deadline.c
571
static int dd_request_merge(struct request_queue *q, struct request **rq,
block/mq-deadline.c
589
*rq = __rq;
block/mq-deadline.c
623
static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
block/mq-deadline.c
628
const enum dd_data_dir data_dir = rq_data_dir(rq);
block/mq-deadline.c
629
u16 ioprio = req_get_ioprio(rq);
block/mq-deadline.c
638
if (!rq->elv.priv[0])
block/mq-deadline.c
640
rq->elv.priv[0] = per_prio;
block/mq-deadline.c
642
if (blk_mq_sched_try_insert_merge(q, rq, free))
block/mq-deadline.c
645
trace_block_rq_insert(rq);
block/mq-deadline.c
648
list_add(&rq->queuelist, &dd->dispatch);
block/mq-deadline.c
649
rq->fifo_time = jiffies;
block/mq-deadline.c
651
deadline_add_rq_rb(per_prio, rq);
block/mq-deadline.c
653
if (rq_mergeable(rq)) {
block/mq-deadline.c
654
elv_rqhash_add(q, rq);
block/mq-deadline.c
656
q->last_merge = rq;
block/mq-deadline.c
662
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
block/mq-deadline.c
663
list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
block/mq-deadline.c
680
struct request *rq;
block/mq-deadline.c
682
rq = list_first_entry(list, struct request, queuelist);
block/mq-deadline.c
683
list_del_init(&rq->queuelist);
block/mq-deadline.c
684
dd_insert_request(hctx, rq, flags, &free);
block/mq-deadline.c
692
static void dd_prepare_request(struct request *rq)
block/mq-deadline.c
694
rq->elv.priv[0] = NULL;
block/mq-deadline.c
700
static void dd_finish_request(struct request *rq)
block/mq-deadline.c
702
struct dd_per_prio *per_prio = rq->elv.priv[0];
block/mq-deadline.c
843
struct request *rq; \
block/mq-deadline.c
845
rq = deadline_from_pos(per_prio, data_dir, \
block/mq-deadline.c
847
if (rq) \
block/mq-deadline.c
848
__blk_mq_debugfs_rq_show(m, rq); \
block/t10-pi.c
125
static void t10_pi_type1_prepare(struct request *rq)
block/t10-pi.c
127
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
129
u32 ref_tag = t10_pi_ref_tag(rq);
block/t10-pi.c
133
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
176
static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
block/t10-pi.c
178
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
181
u32 ref_tag = t10_pi_ref_tag(rq);
block/t10-pi.c
185
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
299
static void ext_pi_type1_prepare(struct request *rq)
block/t10-pi.c
301
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
303
u64 ref_tag = ext_pi_ref_tag(rq);
block/t10-pi.c
307
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
339
static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
block/t10-pi.c
341
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
344
u64 ref_tag = ext_pi_ref_tag(rq);
block/t10-pi.c
348
__rq_for_each_bio(bio, rq) {
block/t10-pi.c
449
void blk_integrity_prepare(struct request *rq)
block/t10-pi.c
451
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
457
ext_pi_type1_prepare(rq);
block/t10-pi.c
459
t10_pi_type1_prepare(rq);
block/t10-pi.c
462
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
block/t10-pi.c
464
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
470
ext_pi_type1_complete(rq, nr_bytes);
block/t10-pi.c
472
t10_pi_type1_complete(rq, nr_bytes);
drivers/ata/libata-scsi.c
1056
bool ata_scsi_dma_need_drain(struct request *rq)
drivers/ata/libata-scsi.c
1058
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/ata/libata-scsi.c
1517
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/ata/libata-scsi.c
1520
if (!blk_rq_is_passthrough(rq))
drivers/ata/libata-scsi.c
1523
req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
drivers/ata/libata-scsi.c
1552
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/ata/libata-scsi.c
1553
int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
drivers/block/amiflop.c
1460
struct request *rq)
drivers/block/amiflop.c
1466
for (cnt = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
drivers/block/amiflop.c
1469
blk_rq_pos(rq), cnt,
drivers/block/amiflop.c
1470
(rq_data_dir(rq) == READ) ? "read" : "write");
drivers/block/amiflop.c
1472
block = blk_rq_pos(rq) + cnt;
drivers/block/amiflop.c
1475
data = bio_data(rq->bio) + 512 * cnt;
drivers/block/amiflop.c
1484
if (rq_data_dir(rq) == READ) {
drivers/block/amiflop.c
1508
struct request *rq = bd->rq;
drivers/block/amiflop.c
1509
struct amiga_floppy_struct *floppy = rq->q->disk->private_data;
drivers/block/amiflop.c
1515
blk_mq_start_request(rq);
drivers/block/amiflop.c
1518
err = amiflop_rw_cur_segment(floppy, rq);
drivers/block/amiflop.c
1519
} while (blk_update_request(rq, err, blk_rq_cur_bytes(rq)));
drivers/block/amiflop.c
1520
blk_mq_end_request(rq, err);
drivers/block/aoe/aoe.h
112
struct request *rq;
drivers/block/aoe/aoe.h
185
struct request *rq;
drivers/block/aoe/aoeblk.c
261
blk_mq_start_request(bd->rq);
drivers/block/aoe/aoeblk.c
265
list_add_tail(&bd->rq->queuelist, &d->rq_list);
drivers/block/aoe/aoecmd.c
1041
aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
drivers/block/aoe/aoecmd.c
1049
if (rq == d->ip.rq)
drivers/block/aoe/aoecmd.c
1050
d->ip.rq = NULL;
drivers/block/aoe/aoecmd.c
1052
bio = rq->bio;
drivers/block/aoe/aoecmd.c
1056
} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
drivers/block/aoe/aoecmd.c
1058
__blk_mq_end_request(rq, err);
drivers/block/aoe/aoecmd.c
1068
struct request *rq = buf->rq;
drivers/block/aoe/aoecmd.c
1069
struct aoe_req *req = blk_mq_rq_to_pdu(rq);
drivers/block/aoe/aoecmd.c
1075
aoe_end_request(d, rq, 0);
drivers/block/aoe/aoecmd.c
834
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
drivers/block/aoe/aoecmd.c
837
buf->rq = rq;
drivers/block/aoe/aoecmd.c
845
struct request *rq;
drivers/block/aoe/aoecmd.c
856
rq = d->ip.rq;
drivers/block/aoe/aoecmd.c
857
if (rq == NULL) {
drivers/block/aoe/aoecmd.c
858
rq = list_first_entry_or_null(&d->rq_list, struct request,
drivers/block/aoe/aoecmd.c
860
if (rq == NULL)
drivers/block/aoe/aoecmd.c
862
list_del_init(&rq->queuelist);
drivers/block/aoe/aoecmd.c
863
blk_mq_start_request(rq);
drivers/block/aoe/aoecmd.c
864
d->ip.rq = rq;
drivers/block/aoe/aoecmd.c
865
d->ip.nxbio = rq->bio;
drivers/block/aoe/aoecmd.c
867
req = blk_mq_rq_to_pdu(rq);
drivers/block/aoe/aoecmd.c
869
__rq_for_each_bio(bio, rq)
drivers/block/aoe/aoecmd.c
878
bufinit(buf, rq, bio);
drivers/block/aoe/aoecmd.c
882
d->ip.rq = NULL;
drivers/block/aoe/aoedev.c
162
struct request *rq;
drivers/block/aoe/aoedev.c
167
rq = d->ip.rq;
drivers/block/aoe/aoedev.c
168
if (rq == NULL)
drivers/block/aoe/aoedev.c
171
req = blk_mq_rq_to_pdu(rq);
drivers/block/aoe/aoedev.c
179
aoe_end_request(d, rq, 0);
drivers/block/aoe/aoedev.c
201
struct request *rq, *rqnext;
drivers/block/aoe/aoedev.c
231
list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
drivers/block/aoe/aoedev.c
232
list_del_init(&rq->queuelist);
drivers/block/aoe/aoedev.c
233
blk_mq_start_request(rq);
drivers/block/aoe/aoedev.c
234
blk_mq_end_request(rq, BLK_STS_IOERR);
drivers/block/ataflop.c
1507
struct atari_floppy_struct *floppy = bd->rq->q->disk->private_data;
drivers/block/ataflop.c
1512
drive, type, blk_rq_cur_sectors(bd->rq),
drivers/block/ataflop.c
1513
blk_rq_sectors(bd->rq), bd->last));
drivers/block/ataflop.c
1524
fd_request = bd->rq;
drivers/block/ataflop.c
1544
set_capacity(bd->rq->q->disk, UDT->blocks);
drivers/block/ataflop.c
1564
set_capacity(bd->rq->q->disk, UDT->blocks);
drivers/block/floppy.c
2853
blk_mq_start_request(bd->rq);
drivers/block/floppy.c
2873
list_add_tail(&bd->rq->queuelist, &floppy_reqs);
drivers/block/loop.c
1854
struct request *rq = bd->rq;
drivers/block/loop.c
1855
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/loop.c
1856
struct loop_device *lo = rq->q->queuedata;
drivers/block/loop.c
1858
blk_mq_start_request(rq);
drivers/block/loop.c
1863
switch (req_op(rq)) {
drivers/block/loop.c
1878
if (rq->bio) {
drivers/block/loop.c
1879
cmd->blkcg_css = bio_blkcg_css(rq->bio);
drivers/block/loop.c
1898
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/loop.c
1899
const bool write = op_is_write(req_op(rq));
drivers/block/loop.c
1900
struct loop_device *lo = rq->q->queuedata;
drivers/block/loop.c
1910
if (rq->cmd_flags & REQ_NOWAIT)
drivers/block/loop.c
1911
rq->cmd_flags &= ~REQ_NOWAIT;
drivers/block/loop.c
1925
ret = do_req_filebacked(lo, rq);
drivers/block/loop.c
1941
if (likely(!blk_should_fake_timeout(rq->q)))
drivers/block/loop.c
1942
blk_mq_complete_request(rq);
drivers/block/loop.c
246
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
drivers/block/loop.c
261
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
drivers/block/loop.c
275
static int lo_req_flush(struct loop_device *lo, struct request *rq)
drivers/block/loop.c
284
static void lo_complete_rq(struct request *rq)
drivers/block/loop.c
286
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/loop.c
289
if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
drivers/block/loop.c
290
req_op(rq) != REQ_OP_READ) {
drivers/block/loop.c
301
blk_update_request(rq, BLK_STS_OK, cmd->ret);
drivers/block/loop.c
303
blk_mq_requeue_request(rq, true);
drivers/block/loop.c
305
struct bio *bio = rq->bio;
drivers/block/loop.c
314
blk_mq_end_request(rq, ret);
drivers/block/loop.c
320
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/loop.c
326
if (req_op(rq) == REQ_OP_WRITE)
drivers/block/loop.c
328
if (likely(!blk_should_fake_timeout(rq->q)))
drivers/block/loop.c
329
blk_mq_complete_request(rq);
drivers/block/loop.c
346
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/loop.c
347
struct bio *bio = rq->bio;
drivers/block/loop.c
354
nr_bvec = blk_rq_nr_bvec(rq);
drivers/block/loop.c
356
if (rq->bio != rq->biotail) {
drivers/block/loop.c
369
rq_for_each_bvec(tmp, rq, rq_iter) {
drivers/block/loop.c
386
iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
drivers/block/loop.c
391
cmd->iocb.ki_ioprio = req_get_ioprio(rq);
drivers/block/loop.c
413
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
drivers/block/loop.c
415
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/loop.c
416
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
drivers/block/loop.c
418
switch (req_op(rq)) {
drivers/block/loop.c
420
return lo_req_flush(lo, rq);
drivers/block/loop.c
426
return lo_fallocate(lo, rq, pos,
drivers/block/loop.c
427
(rq->cmd_flags & REQ_NOUNMAP) ?
drivers/block/loop.c
431
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
drivers/block/mtip32xx/mtip32xx.c
1003
rq->timeout = timeout;
drivers/block/mtip32xx/mtip32xx.c
1006
blk_execute_rq(rq, true);
drivers/block/mtip32xx/mtip32xx.c
1037
blk_mq_free_request(rq);
drivers/block/mtip32xx/mtip32xx.c
2046
static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
drivers/block/mtip32xx/mtip32xx.c
2051
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
drivers/block/mtip32xx/mtip32xx.c
2054
int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
drivers/block/mtip32xx/mtip32xx.c
2055
u64 start = blk_rq_pos(rq);
drivers/block/mtip32xx/mtip32xx.c
2056
unsigned int nsect = blk_rq_sectors(rq);
drivers/block/mtip32xx/mtip32xx.c
2060
command->scatter_ents = blk_rq_map_sg(rq, command->sg);
drivers/block/mtip32xx/mtip32xx.c
2092
fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
drivers/block/mtip32xx/mtip32xx.c
2116
set_bit(rq->tag, port->cmds_to_issue);
drivers/block/mtip32xx/mtip32xx.c
2122
mtip_issue_ncq_command(port, rq->tag);
drivers/block/mtip32xx/mtip32xx.c
2419
static void mtip_softirq_done_fn(struct request *rq)
drivers/block/mtip32xx/mtip32xx.c
2421
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/mtip32xx/mtip32xx.c
2422
struct driver_data *dd = rq->q->queuedata;
drivers/block/mtip32xx/mtip32xx.c
2431
blk_mq_end_request(rq, cmd->status);
drivers/block/mtip32xx/mtip32xx.c
3224
static inline bool is_stopped(struct driver_data *dd, struct request *rq)
drivers/block/mtip32xx/mtip32xx.c
3234
rq_data_dir(rq))
drivers/block/mtip32xx/mtip32xx.c
3245
struct request *rq)
drivers/block/mtip32xx/mtip32xx.c
3248
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/mtip32xx/mtip32xx.c
3250
if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
drivers/block/mtip32xx/mtip32xx.c
3257
if (blk_rq_sectors(rq) <= 64) {
drivers/block/mtip32xx/mtip32xx.c
3258
if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
drivers/block/mtip32xx/mtip32xx.c
3269
struct request *rq)
drivers/block/mtip32xx/mtip32xx.c
3272
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/mtip32xx/mtip32xx.c
3275
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
drivers/block/mtip32xx/mtip32xx.c
3300
blk_mq_start_request(rq);
drivers/block/mtip32xx/mtip32xx.c
3301
mtip_issue_non_ncq_command(dd->port, rq->tag);
drivers/block/mtip32xx/mtip32xx.c
3309
struct request *rq = bd->rq;
drivers/block/mtip32xx/mtip32xx.c
3310
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/mtip32xx/mtip32xx.c
3312
if (blk_rq_is_passthrough(rq))
drivers/block/mtip32xx/mtip32xx.c
3313
return mtip_issue_reserved_cmd(hctx, rq);
drivers/block/mtip32xx/mtip32xx.c
3315
if (unlikely(mtip_check_unal_depth(hctx, rq)))
drivers/block/mtip32xx/mtip32xx.c
3318
if (is_se_active(dd) || is_stopped(dd, rq))
drivers/block/mtip32xx/mtip32xx.c
3321
blk_mq_start_request(rq);
drivers/block/mtip32xx/mtip32xx.c
3323
if (mtip_hw_submit_io(dd, rq, cmd, hctx))
drivers/block/mtip32xx/mtip32xx.c
3329
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
drivers/block/mtip32xx/mtip32xx.c
3333
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/mtip32xx/mtip32xx.c
3342
static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
drivers/block/mtip32xx/mtip32xx.c
3346
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/mtip32xx/mtip32xx.c
956
struct request *rq;
drivers/block/mtip32xx/mtip32xx.c
974
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
drivers/block/mtip32xx/mtip32xx.c
975
if (IS_ERR(rq)) {
drivers/block/mtip32xx/mtip32xx.c
991
blk_mq_free_request(rq);
drivers/block/mtip32xx/mtip32xx.c
999
int_cmd = blk_mq_rq_to_pdu(rq);
drivers/block/nbd.c
1006
rq = blk_mq_rq_from_pdu(cmd);
drivers/block/nbd.c
1007
if (likely(!blk_should_fake_timeout(rq->q))) {
drivers/block/nbd.c
1015
blk_mq_complete_request(rq);
drivers/block/nbd.c
1187
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
drivers/block/nbd.c
1890
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
drivers/block/nbd.c
1893
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/nbd.c
980
struct request *rq;
drivers/block/null_blk/main.c
1285
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/null_blk/main.c
1289
loff_t pos = blk_rq_pos(rq) << SECTOR_SHIFT;
drivers/block/null_blk/main.c
1296
rq_for_each_segment(bvec, rq, iter) {
drivers/block/null_blk/main.c
1301
op_is_write(req_op(rq)), pos,
drivers/block/null_blk/main.c
1302
rq->cmd_flags & REQ_FUA);
drivers/block/null_blk/main.c
1320
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/null_blk/main.c
1325
if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
drivers/block/null_blk/main.c
1385
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/null_blk/main.c
1389
if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) {
drivers/block/null_blk/main.c
1390
__rq_for_each_bio(bio, rq)
drivers/block/null_blk/main.c
1397
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/null_blk/main.c
1412
blk_mq_complete_request(rq);
drivers/block/null_blk/main.c
1415
blk_mq_end_request(rq, cmd->error);
drivers/block/null_blk/main.c
1495
static bool should_timeout_request(struct request *rq)
drivers/block/null_blk/main.c
1497
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/null_blk/main.c
1503
static bool should_requeue_request(struct request *rq)
drivers/block/null_blk/main.c
1505
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/null_blk/main.c
1518
static bool should_timeout_request(struct request *rq)
drivers/block/null_blk/main.c
1523
static bool should_requeue_request(struct request *rq)
drivers/block/null_blk/main.c
1593
struct request *rq;
drivers/block/null_blk/main.c
1597
list_for_each_entry(rq, &list, queuelist)
drivers/block/null_blk/main.c
1598
blk_mq_set_request_complete(rq);
drivers/block/null_blk/main.c
1619
static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
drivers/block/null_blk/main.c
1621
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
drivers/block/null_blk/main.c
1622
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/null_blk/main.c
1629
if (blk_mq_request_completed(rq)) {
drivers/block/null_blk/main.c
1633
list_del_init(&rq->queuelist);
drivers/block/null_blk/main.c
1637
pr_info("rq %p timed out\n", rq);
drivers/block/null_blk/main.c
1648
blk_mq_complete_request(rq);
drivers/block/null_blk/main.c
1655
struct request *rq = bd->rq;
drivers/block/null_blk/main.c
1656
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/null_blk/main.c
1658
sector_t nr_sectors = blk_rq_sectors(rq);
drivers/block/null_blk/main.c
1659
sector_t sector = blk_rq_pos(rq);
drivers/block/null_blk/main.c
1670
cmd->fake_timeout = should_timeout_request(rq) ||
drivers/block/null_blk/main.c
1671
blk_should_fake_timeout(rq->q);
drivers/block/null_blk/main.c
1673
if (should_requeue_request(rq)) {
drivers/block/null_blk/main.c
1681
blk_mq_requeue_request(rq, true);
drivers/block/null_blk/main.c
1692
blk_mq_start_request(rq);
drivers/block/null_blk/main.c
1696
list_add_tail(&rq->queuelist, &nq->poll_list);
drivers/block/null_blk/main.c
1703
null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
drivers/block/null_blk/main.c
1714
struct request *rq = rq_list_pop(rqlist);
drivers/block/null_blk/main.c
1716
bd.rq = rq;
drivers/block/null_blk/main.c
1717
ret = null_queue_rq(rq->mq_hctx, &bd);
drivers/block/null_blk/main.c
1719
rq_list_add_tail(&requeue_list, rq);
drivers/block/null_blk/main.c
859
static void null_complete_rq(struct request *rq)
drivers/block/null_blk/main.c
861
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/null_blk/main.c
863
blk_mq_end_request(rq, cmd->error);
drivers/block/ps3disk.c
192
blk_mq_start_request(bd->rq);
drivers/block/ps3disk.c
195
ret = ps3disk_do_request(dev, bd->rq);
drivers/block/rbd.c
3501
struct request *rq = blk_mq_rq_from_pdu(img_req);
drivers/block/rbd.c
3502
u64 off = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
drivers/block/rbd.c
3503
u64 len = blk_rq_bytes(rq);
drivers/block/rbd.c
3625
struct request *rq = blk_mq_rq_from_pdu(img_req);
drivers/block/rbd.c
3628
blk_mq_end_request(rq, errno_to_blk_status(result));
drivers/block/rbd.c
4743
struct request *rq = blk_mq_rq_from_pdu(img_request);
drivers/block/rbd.c
4744
u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
drivers/block/rbd.c
4745
u64 length = blk_rq_bytes(rq);
drivers/block/rbd.c
4755
blk_mq_start_request(rq);
drivers/block/rbd.c
4768
rq->bio);
drivers/block/rbd.c
4780
blk_mq_end_request(rq, errno_to_blk_status(result));
drivers/block/rbd.c
4787
struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
drivers/block/rbd.c
4790
switch (req_op(bd->rq)) {
drivers/block/rbd.c
4804
rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
drivers/block/rnbd/rnbd-clt.c
1003
iu->rq = rq;
drivers/block/rnbd/rnbd-clt.c
1005
msg.sector = cpu_to_le64(blk_rq_pos(rq));
drivers/block/rnbd/rnbd-clt.c
1006
msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
drivers/block/rnbd/rnbd-clt.c
1007
msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
drivers/block/rnbd/rnbd-clt.c
1008
msg.prio = cpu_to_le16(req_get_ioprio(rq));
drivers/block/rnbd/rnbd-clt.c
1014
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
drivers/block/rnbd/rnbd-clt.c
1015
sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
drivers/block/rnbd/rnbd-clt.c
1032
err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
drivers/block/rnbd/rnbd-clt.c
1116
struct request *rq = bd->rq;
drivers/block/rnbd/rnbd-clt.c
1117
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
drivers/block/rnbd/rnbd-clt.c
1118
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
drivers/block/rnbd/rnbd-clt.c
1137
blk_rq_nr_phys_segments(rq) ? : 1,
drivers/block/rnbd/rnbd-clt.c
1147
blk_mq_start_request(rq);
drivers/block/rnbd/rnbd-clt.c
1148
err = rnbd_client_xfer_request(dev, rq, iu);
drivers/block/rnbd/rnbd-clt.c
368
static void rnbd_softirq_done_fn(struct request *rq)
drivers/block/rnbd/rnbd-clt.c
370
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
drivers/block/rnbd/rnbd-clt.c
374
iu = blk_mq_rq_to_pdu(rq);
drivers/block/rnbd/rnbd-clt.c
377
blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
drivers/block/rnbd/rnbd-clt.c
384
struct request *rq = iu->rq;
drivers/block/rnbd/rnbd-clt.c
385
int rw = rq_data_dir(rq);
drivers/block/rnbd/rnbd-clt.c
389
blk_mq_complete_request(rq);
drivers/block/rnbd/rnbd-clt.c
991
struct request *rq,
drivers/block/rnbd/rnbd-clt.h
52
struct request *rq; /* for block io */
drivers/block/rnbd/rnbd-proto.h
274
static inline u32 rq_to_rnbd_flags(struct request *rq)
drivers/block/rnbd/rnbd-proto.h
278
switch (req_op(rq)) {
drivers/block/rnbd/rnbd-proto.h
294
if (rq->cmd_flags & REQ_NOUNMAP)
drivers/block/rnbd/rnbd-proto.h
302
(__force u32)req_op(rq),
drivers/block/rnbd/rnbd-proto.h
303
(__force unsigned long long)rq->cmd_flags);
drivers/block/rnbd/rnbd-proto.h
307
if (op_is_sync(rq->cmd_flags))
drivers/block/rnbd/rnbd-proto.h
310
if (op_is_flush(rq->cmd_flags))
drivers/block/rnbd/rnbd-proto.h
313
if (rq->cmd_flags & REQ_PREFLUSH)
drivers/block/sunvdc.c
545
blk_mq_start_request(bd->rq);
drivers/block/sunvdc.c
563
if (__send_request(bd->rq) < 0) {
drivers/block/swim.c
531
struct request *req = bd->rq;
drivers/block/swim3.c
311
struct request *req = bd->rq;
drivers/block/ublk_drv.c
1137
static inline bool ublk_rq_has_data(const struct request *rq)
drivers/block/ublk_drv.c
1139
return bio_has_data(rq->bio);
drivers/block/ublk_drv.c
1579
struct request *rq)
drivers/block/ublk_drv.c
1583
blk_mq_requeue_request(rq, false);
drivers/block/ublk_drv.c
1585
ublk_end_request(rq, BLK_STS_IOERR);
drivers/block/ublk_drv.c
1983
static void ublk_batch_queue_cmd(struct ublk_queue *ubq, struct request *rq, bool last)
drivers/block/ublk_drv.c
1985
unsigned short tag = rq->tag;
drivers/block/ublk_drv.c
1998
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
drivers/block/ublk_drv.c
2000
struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
drivers/block/ublk_drv.c
2003
pdu->req = rq;
drivers/block/ublk_drv.c
2011
struct request *rq = pdu->req_list;
drivers/block/ublk_drv.c
2015
next = rq->rq_next;
drivers/block/ublk_drv.c
2016
rq->rq_next = NULL;
drivers/block/ublk_drv.c
2017
ublk_dispatch_req(rq->mq_hctx->driver_data, rq);
drivers/block/ublk_drv.c
2018
rq = next;
drivers/block/ublk_drv.c
2019
} while (rq);
drivers/block/ublk_drv.c
2032
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
drivers/block/ublk_drv.c
2034
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
drivers/block/ublk_drv.c
2054
static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
drivers/block/ublk_drv.c
2079
res = ublk_setup_iod(ubq, rq);
drivers/block/ublk_drv.c
2083
blk_mq_start_request(rq);
drivers/block/ublk_drv.c
2093
struct request *rq,
drivers/block/ublk_drv.c
2098
res = ublk_prep_req(ubq, rq, false);
drivers/block/ublk_drv.c
2111
__ublk_abort_rq(ubq, rq);
drivers/block/ublk_drv.c
2123
struct request *rq = bd->rq;
drivers/block/ublk_drv.c
2127
res = __ublk_queue_rq_common(ubq, rq, &should_queue);
drivers/block/ublk_drv.c
2131
ublk_queue_cmd(ubq, rq);
drivers/block/ublk_drv.c
2139
struct request *rq = bd->rq;
drivers/block/ublk_drv.c
2143
res = __ublk_queue_rq_common(ubq, rq, &should_queue);
drivers/block/ublk_drv.c
2147
ublk_batch_queue_cmd(ubq, rq, bd->last);
drivers/block/ublk_drv.c
2204
struct request *rq;
drivers/block/ublk_drv.c
2208
rq_list_for_each(l, rq) {
drivers/block/ublk_drv.c
2209
tags[cnt++] = (unsigned short)rq->tag;
drivers/block/ublk_drv.c
2829
static bool ublk_check_inflight_rq(struct request *rq, void *data)
drivers/block/ublk_drv.c
2833
if (blk_mq_request_started(rq)) {
drivers/block/ublk_drv.c
3064
struct request *rq = priv;
drivers/block/ublk_drv.c
3065
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
drivers/block/ublk_drv.c
3066
struct ublk_io *io = &ubq->ios[rq->tag];
drivers/block/ublk_drv.c
3075
ublk_put_req_ref(io, rq);
drivers/block/ublk_drv.c
5034
static bool ublk_count_busy_req(struct request *rq, void *data)
drivers/block/ublk_drv.c
5038
if (!blk_mq_request_started(rq) && rq->mq_hctx->driver_data == idle->ubq)
drivers/block/virtio_blk.c
430
struct request *req = bd->rq;
drivers/block/xen-blkfront.c
1085
BUG_ON(info->rq != NULL);
drivers/block/xen-blkfront.c
1164
info->rq = gd->queue;
drivers/block/xen-blkfront.c
1187
blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
drivers/block/xen-blkfront.c
122
static inline struct blkif_req *blkif_req(struct request *rq)
drivers/block/xen-blkfront.c
124
return blk_mq_rq_to_pdu(rq);
drivers/block/xen-blkfront.c
1310
if (info->rq)
drivers/block/xen-blkfront.c
1311
blk_mq_stop_hw_queues(info->rq);
drivers/block/xen-blkfront.c
1603
struct request_queue *rq = info->rq;
drivers/block/xen-blkfront.c
1610
blk_queue_disable_discard(rq);
drivers/block/xen-blkfront.c
1611
blk_queue_disable_secure_erase(rq);
drivers/block/xen-blkfront.c
2015
lim = queue_limits_start_update(info->rq);
drivers/block/xen-blkfront.c
2018
rc = queue_limits_commit_update(info->rq, &lim);
drivers/block/xen-blkfront.c
2045
blk_mq_start_stopped_hw_queues(info->rq, true);
drivers/block/xen-blkfront.c
2046
blk_mq_kick_requeue_list(info->rq);
drivers/block/xen-blkfront.c
211
struct request_queue *rq;
drivers/block/xen-blkfront.c
2132
if (info->rq && info->gd) {
drivers/block/xen-blkfront.c
2133
blk_mq_stop_hw_queues(info->rq);
drivers/block/xen-blkfront.c
2399
info->rq = NULL;
drivers/block/xen-blkfront.c
902
blk_mq_start_request(qd->rq);
drivers/block/xen-blkfront.c
915
if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush))
drivers/block/xen-blkfront.c
920
if (blkif_queue_request(qd->rq, rinfo))
drivers/block/xen-blkfront.c
933
blk_mq_end_request(qd->rq, BLK_STS_OK);
drivers/block/xen-blkfront.c
937
static void blkif_complete_rq(struct request *rq)
drivers/block/xen-blkfront.c
939
blk_mq_end_request(rq, blkif_req(rq)->error);
drivers/block/z2ram.c
71
struct request *req = bd->rq;
drivers/block/zloop.c
149
static unsigned int rq_zone_no(struct request *rq)
drivers/block/zloop.c
151
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
153
return blk_rq_pos(rq) >> zlo->zone_shift;
drivers/block/zloop.c
363
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/zloop.c
369
if (likely(!blk_should_fake_timeout(rq->q)))
drivers/block/zloop.c
370
blk_mq_complete_request(rq);
drivers/block/zloop.c
383
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/zloop.c
384
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
385
unsigned int zone_no = rq_zone_no(rq);
drivers/block/zloop.c
386
sector_t sector = blk_rq_pos(rq);
drivers/block/zloop.c
387
sector_t nr_sectors = blk_rq_sectors(rq);
drivers/block/zloop.c
388
bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
drivers/block/zloop.c
389
bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
drivers/block/zloop.c
490
nr_bvec = blk_rq_nr_bvec(rq);
drivers/block/zloop.c
492
if (rq->bio != rq->biotail) {
drivers/block/zloop.c
508
rq_for_each_bvec(tmp, rq, rq_iter) {
drivers/block/zloop.c
512
iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
drivers/block/zloop.c
520
__bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter),
drivers/block/zloop.c
521
nr_bvec, blk_rq_bytes(rq));
drivers/block/zloop.c
522
iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
drivers/block/zloop.c
562
struct request *rq = blk_mq_rq_from_pdu(cmd);
drivers/block/zloop.c
563
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
566
if (rq->cmd_flags & REQ_NOWAIT)
drivers/block/zloop.c
567
rq->cmd_flags &= ~REQ_NOWAIT;
drivers/block/zloop.c
569
switch (req_op(rq)) {
drivers/block/zloop.c
583
cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
drivers/block/zloop.c
589
cmd->ret = zloop_finish_zone(zlo, rq_zone_no(rq));
drivers/block/zloop.c
592
cmd->ret = zloop_open_zone(zlo, rq_zone_no(rq));
drivers/block/zloop.c
595
cmd->ret = zloop_close_zone(zlo, rq_zone_no(rq));
drivers/block/zloop.c
599
pr_err("Unsupported operation %d\n", req_op(rq));
drivers/block/zloop.c
604
blk_mq_complete_request(rq);
drivers/block/zloop.c
617
static void zloop_complete_rq(struct request *rq)
drivers/block/zloop.c
619
struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/zloop.c
620
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
625
switch (req_op(rq)) {
drivers/block/zloop.c
631
if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
drivers/block/zloop.c
635
__rq_for_each_bio(bio, rq)
drivers/block/zloop.c
644
req_op(rq) == REQ_OP_WRITE ? "" : "append ",
drivers/block/zloop.c
647
if (cmd->ret >= 0 && cmd->ret != blk_rq_bytes(rq)) {
drivers/block/zloop.c
649
zone_no, cmd->ret, blk_rq_bytes(rq));
drivers/block/zloop.c
662
if (req_op(rq) == REQ_OP_ZONE_APPEND)
drivers/block/zloop.c
663
rq->__sector = cmd->sector;
drivers/block/zloop.c
672
blk_mq_end_request(rq, sts);
drivers/block/zloop.c
675
static bool zloop_set_zone_append_sector(struct request *rq)
drivers/block/zloop.c
677
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
678
unsigned int zone_no = rq_zone_no(rq);
drivers/block/zloop.c
681
sector_t nr_sectors = blk_rq_sectors(rq);
drivers/block/zloop.c
692
rq->__sector = zone->wp;
drivers/block/zloop.c
693
zone->wp += blk_rq_sectors(rq);
drivers/block/zloop.c
707
struct request *rq = bd->rq;
drivers/block/zloop.c
708
struct zloop_cmd *cmd = blk_mq_rq_to_pdu(rq);
drivers/block/zloop.c
709
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
719
if (zlo->ordered_zone_append && req_op(rq) == REQ_OP_ZONE_APPEND) {
drivers/block/zloop.c
720
if (!zloop_set_zone_append_sector(rq))
drivers/block/zloop.c
724
blk_mq_start_request(rq);
drivers/cdrom/gdrom.c
637
blk_mq_start_request(bd->rq);
drivers/cdrom/gdrom.c
639
switch (req_op(bd->rq)) {
drivers/cdrom/gdrom.c
641
return gdrom_readdisk_dma(bd->rq);
drivers/char/agp/isoch.c
127
target.rq = (tstatus >> 24) & 0xff;
drivers/char/agp/isoch.c
215
master[cdev].rq = master[cdev].n;
drivers/char/agp/isoch.c
217
master[cdev].rq *= (1 << (master[cdev].y - 1));
drivers/char/agp/isoch.c
219
tot_rq += master[cdev].rq;
drivers/char/agp/isoch.c
226
rq_async = target.rq - rq_isoch;
drivers/char/agp/isoch.c
251
master[cdev].rq += (cdev == ndevs - 1)
drivers/char/agp/isoch.c
263
mcmd |= master[cdev].rq << 24;
drivers/char/agp/isoch.c
76
u32 rq;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1111
struct drm_gpu_scheduler *sched = entity->rq->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1242
sched = p->gang_leader->base.entity->rq->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
91
if (entity->rq == NULL)
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
392
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
491
struct drm_sched_rq *rq = sched->sched_rq[i];
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
492
spin_lock(&rq->lock);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
493
list_for_each_entry(s_entity, &rq->entities, list) {
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
502
spin_unlock(&rq->lock);
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
110
return to_amdgpu_ring(job->base.entity->rq->sched);
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
156
__entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
159
to_amdgpu_ring(job->base.entity->rq->sched));
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
113
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
474
sched = entity->entity.rq->sched;
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
476
ring = to_amdgpu_ring(entity->entity.rq->sched);
drivers/gpu/drm/i915/display/intel_overlay.c
239
struct i915_request *rq;
drivers/gpu/drm/i915/display/intel_overlay.c
244
rq = i915_request_create(overlay->context);
drivers/gpu/drm/i915/display/intel_overlay.c
245
if (IS_ERR(rq))
drivers/gpu/drm/i915/display/intel_overlay.c
246
return rq;
drivers/gpu/drm/i915/display/intel_overlay.c
248
err = i915_active_add_request(&overlay->last_flip, rq);
drivers/gpu/drm/i915/display/intel_overlay.c
250
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
254
return rq;
drivers/gpu/drm/i915/display/intel_overlay.c
261
struct i915_request *rq;
drivers/gpu/drm/i915/display/intel_overlay.c
266
rq = alloc_request(overlay, NULL);
drivers/gpu/drm/i915/display/intel_overlay.c
267
if (IS_ERR(rq))
drivers/gpu/drm/i915/display/intel_overlay.c
268
return PTR_ERR(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
270
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/display/intel_overlay.c
272
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
285
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/display/intel_overlay.c
287
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
324
struct i915_request *rq;
drivers/gpu/drm/i915/display/intel_overlay.c
338
rq = alloc_request(overlay, NULL);
drivers/gpu/drm/i915/display/intel_overlay.c
339
if (IS_ERR(rq))
drivers/gpu/drm/i915/display/intel_overlay.c
340
return PTR_ERR(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
342
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/display/intel_overlay.c
344
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
350
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/display/intel_overlay.c
353
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
406
struct i915_request *rq;
drivers/gpu/drm/i915/display/intel_overlay.c
419
rq = alloc_request(overlay, intel_overlay_off_tail);
drivers/gpu/drm/i915/display/intel_overlay.c
420
if (IS_ERR(rq))
drivers/gpu/drm/i915/display/intel_overlay.c
421
return PTR_ERR(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
423
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/display/intel_overlay.c
425
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
439
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/display/intel_overlay.c
442
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
464
struct i915_request *rq;
drivers/gpu/drm/i915/display/intel_overlay.c
479
rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
drivers/gpu/drm/i915/display/intel_overlay.c
480
if (IS_ERR(rq))
drivers/gpu/drm/i915/display/intel_overlay.c
481
return PTR_ERR(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
483
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/display/intel_overlay.c
485
i915_request_add(rq);
drivers/gpu/drm/i915/display/intel_overlay.c
491
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/display/intel_overlay.c
493
i915_request_add(rq);
drivers/gpu/drm/i915/gem/i915_gem_busy.c
42
const struct i915_request *rq;
drivers/gpu/drm/i915/gem/i915_gem_busy.c
75
rq = to_request(current_fence);
drivers/gpu/drm/i915/gem/i915_gem_busy.c
76
if (!i915_request_completed(rq))
drivers/gpu/drm/i915/gem/i915_gem_busy.c
77
return flag(rq->engine->uabi_class);
drivers/gpu/drm/i915/gem/i915_gem_busy.c
86
rq = to_request(fence);
drivers/gpu/drm/i915/gem/i915_gem_busy.c
87
if (i915_request_completed(rq))
drivers/gpu/drm/i915/gem/i915_gem_busy.c
91
BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
drivers/gpu/drm/i915/gem/i915_gem_busy.c
92
return flag(rq->engine->uabi_class);
drivers/gpu/drm/i915/gem/i915_gem_context.c
1357
struct i915_request *rq;
drivers/gpu/drm/i915/gem/i915_gem_context.c
1371
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
drivers/gpu/drm/i915/gem/i915_gem_context.c
1375
if (!i915_request_get_rcu(rq))
drivers/gpu/drm/i915/gem/i915_gem_context.c
1380
if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
drivers/gpu/drm/i915/gem/i915_gem_context.c
1381
found = i915_request_active_engine(rq, &engine);
drivers/gpu/drm/i915/gem/i915_gem_context.c
1383
i915_request_put(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2030
struct i915_request *rq = eb->requests[j];
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2032
if (!rq)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2035
rq->capture_list = eb->capture_lists[j];
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2208
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2213
if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2214
drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2218
cs = intel_ring_begin(rq, 4 * 2 + 2);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2228
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2372
struct i915_request *rq,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2378
if (intel_context_nopreempt(rq->context))
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2379
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2382
err = i915_reset_gen7_sol_offsets(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2393
if (rq->context->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2394
err = rq->context->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2399
err = rq->context->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2408
GEM_BUG_ON(intel_context_is_parallel(rq->context));
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2410
err = rq->context->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2471
struct i915_request *rq;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2487
list_for_each_entry(rq, &tl->requests, link) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2488
if (rq->ring != ring)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2491
if (__intel_ring_space(rq->postfix,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2495
if (&rq->link == &tl->requests)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2498
return i915_request_get(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2505
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2521
rq = eb_throttle(eb, ce);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2524
if (rq) {
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2528
if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2530
i915_request_put(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
2546
i915_request_put(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3003
struct i915_request *rq)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3012
err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3063
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3065
list_for_each_entry_safe(rq, rn, &tl->requests, link)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3066
if (rq == end || !i915_request_retire(rq))
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3070
static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3073
struct intel_timeline * const tl = i915_request_timeline(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3078
lockdep_unpin_lock(&tl->mutex, rq->cookie);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3080
trace_i915_request_add(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3082
prev = __i915_request_commit(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3089
i915_request_set_error_once(rq, -ENOENT);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3090
__i915_request_skip(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3096
__i915_request_skip(rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3098
&rq->fence.flags);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3102
&rq->fence.flags);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3105
__i915_request_queue(rq, &attr);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3125
struct i915_request *rq = eb->requests[i];
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3127
if (!rq)
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3129
err |= eb_request_add(eb, rq, err, i == 0);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3233
eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3243
err = i915_request_await_dma_fence(rq, fence);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3251
err = i915_request_await_execution(rq, in_fence);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3253
err = i915_request_await_dma_fence(rq, in_fence);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3259
err = await_fence_array(eb, rq);
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3269
out_fence = sync_file_create(&rq->fence);
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
63
struct i915_request *rq, *target = NULL;
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
69
list_for_each_entry_reverse(rq,
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
72
if (i915_request_completed(rq))
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
75
if (time_after(rq->emitted_jiffies,
drivers/gpu/drm/i915/gem/i915_gem_throttle.c
79
target = i915_request_get(rq);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
197
struct i915_request *rq;
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
220
0, &rq);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
237
&rq);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
244
if (ret && rq) {
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
245
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
246
i915_request_put(rq);
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
249
return ret ? ERR_PTR(ret) : &rq->fence;
drivers/gpu/drm/i915/gem/i915_gem_wait.c
100
rq = to_request(fence);
drivers/gpu/drm/i915/gem/i915_gem_wait.c
101
engine = rq->engine;
drivers/gpu/drm/i915/gem/i915_gem_wait.c
105
engine->sched_engine->schedule(rq, attr);
drivers/gpu/drm/i915/gem/i915_gem_wait.c
94
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
478
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
503
rq = intel_context_create_request(t->ce);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
504
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
505
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
509
err = igt_vma_move_to_active_unlocked(t->batch, rq, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
511
err = igt_vma_move_to_active_unlocked(src->vma, rq, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
513
err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
515
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
519
i915_request_get(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
520
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
521
if (i915_request_wait(rq, 0, HZ / 2) < 0)
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
523
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
196
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
210
rq = intel_engine_create_kernel_request(ctx->engine);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
211
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
212
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
216
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
238
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
240
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
243
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1002
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1006
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1007
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
101
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1012
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1019
*rq_out = i915_request_get(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1023
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1024
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1050
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1065
rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1066
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1067
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1071
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1073
if (!igt_wait_for_spinner(*spin, rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1096
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1102
ret = emit_rpcs_query(obj, ce, &rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1109
ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1110
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
116
rq = NULL;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
126
if (rq) { /* Force submission order */
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
127
i915_request_await_dma_fence(this, &rq->fence);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
128
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
145
rq = i915_request_get(this);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
148
GEM_BUG_ON(!rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
149
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1516
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
153
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1557
rq = igt_request_alloc(ctx, engine);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1558
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1559
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
156
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1563
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1567
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1568
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1573
err = engine->emit_bb_start(rq, i915_vma_offset(vma),
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1580
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1584
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1585
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1605
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1690
rq = igt_request_alloc(ctx, engine);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1691
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1692
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1696
err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1700
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1701
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1706
err = engine->emit_bb_start(rq, i915_vma_offset(vma),
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1713
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1732
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1733
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
200
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
204
struct i915_request *prev = rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
206
rq = i915_request_create(arg->ce[n]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
207
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
209
arg->result = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
213
i915_request_get(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
216
i915_request_await_dma_fence(rq,
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
221
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
224
if (IS_ERR_OR_NULL(rq))
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
227
if (i915_request_wait(rq, 0, HZ) < 0)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
230
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
243
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
252
struct i915_request *prev = rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
254
rq = i915_request_create(arg->ce[n]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
255
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
257
arg->result = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
261
i915_request_get(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
264
i915_request_await_dma_fence(rq,
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
269
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
275
if (!IS_ERR_OR_NULL(rq))
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
276
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
74
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
87
if (rq) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
88
i915_request_await_dma_fence(this, &rq->fence);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
89
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
91
rq = i915_request_get(this);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
94
if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
945
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
97
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
992
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
993
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
994
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
998
err = i915_vma_move_to_active(batch, rq, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
193
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
224
0xdeadbeaf, &rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
225
if (rq) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
228
dma_resv_add_fence(obj->base.resv, &rq->fence,
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
230
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
394
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
411
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
413
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
414
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
419
err = i915_deps_add_dependency(&deps, &rq->fence, &ctx);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
420
spin_fence = dma_fence_get(&rq->fence);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
421
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1160
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1222
expand32(POISON_INUSE), &rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1224
if (rq && !err) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1227
dma_resv_add_fence(obj->base.resv, &rq->fence,
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1229
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1584
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1602
rq = i915_request_create(engine->kernel_context);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1603
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1604
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1608
err = i915_vma_move_to_active(vma, rq, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1610
err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1611
i915_request_get(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1612
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1614
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
1626
i915_request_put(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
548
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
565
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
566
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
567
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
571
err = i915_vma_move_to_active(vma, rq,
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
574
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
115
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
127
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
128
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
129
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
133
err = igt_vma_move_to_active_unlocked(batch, rq, 0);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
137
err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
145
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
152
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
153
i915_request_add(rq);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
24
struct i915_request *rq;
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
35
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
38
return rq;
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
35
igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq,
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
41
err = i915_vma_move_to_active(vma, rq, flags);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
105
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
115
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
124
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
129
int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
133
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
139
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
144
static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
147
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
148
GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
155
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
16
int gen2_emit_flush(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
161
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
166
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
167
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
172
u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
174
return __gen2_emit_breadcrumb(rq, cs, 16, 8);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
177
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
179
return __gen2_emit_breadcrumb(rq, cs, 8, 8);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
186
int i830_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
191
intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
194
GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
196
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
207
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
213
cs = intel_ring_begin(rq, 6 + 2);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
231
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
240
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
246
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
25
cs = intel_ring_begin(rq, 2 + 4 * num_store_dw);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
251
int gen2_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
260
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
266
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
271
int gen4_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
282
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
288
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
38
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
43
int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
79
if (IS_G4X(rq->i915) || GRAPHICS_VER(rq->i915) == 5)
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
87
cs = intel_ring_begin(rq, i);
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
14
int gen2_emit_flush(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
15
int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
16
int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
18
u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
19
u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
21
int i830_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
24
int gen2_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen2_engine_cs.h
27
int gen4_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
130
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
138
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
143
u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
153
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
165
*cs++ = i915_request_active_seqno(rq) |
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
167
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
172
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
173
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
178
static int mi_flush_dw(struct i915_request *rq, u32 flags)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
182
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
209
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
214
static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
216
return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
219
int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
221
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
224
int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
226
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
229
int gen6_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
240
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
245
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
251
hsw_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
262
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
267
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
272
static int gen7_stall_cs(struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
276
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
284
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
289
int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
292
intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
337
gen7_stall_cs(rq);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
340
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
348
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
353
u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
363
*cs++ = i915_request_active_seqno(rq);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
364
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
369
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
370
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
375
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
377
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
378
GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
382
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
386
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
387
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
393
u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
397
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
398
GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
403
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
408
*cs++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
418
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
419
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
55
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
58
intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
62
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
72
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
74
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
84
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
89
int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
92
intel_gt_scratch_offset(rq->engine->gt,
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
98
ret = gen6_emit_post_sync_nonzero_flush(rq);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
16
int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
17
int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
18
int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
19
u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
20
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
22
int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
23
u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
24
u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
26
int gen6_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen6_engine_cs.h
29
int hsw_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
101
if (rq->engine->class == VIDEO_DECODE_CLASS)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
109
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
114
int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
130
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
135
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
15
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
154
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
159
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
225
static int mtl_dummy_pipe_control(struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
228
if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
229
IS_DG2(rq->i915)) {
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
233
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
240
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
246
int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
248
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
260
err = mtl_dummy_pipe_control(rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
270
if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
281
GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
302
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
308
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
316
err = mtl_dummy_pipe_control(rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
339
if (gen12_needs_ccs_aux_inv(rq->engine))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
342
cs = intel_ring_begin(rq, count);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
358
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
364
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
372
if (gen12_needs_ccs_aux_inv(rq->engine))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
376
cs = intel_ring_begin(rq, cmd);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
395
if (rq->engine->class == VIDEO_DECODE_CLASS)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
398
if (gen12_needs_ccs_aux_inv(rq->engine) &&
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
399
rq->engine->class == COPY_ENGINE_CLASS)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
408
cs = gen12_emit_aux_table_inv(rq->engine, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
413
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
424
static u32 hwsp_offset(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
429
tl = rcu_dereference_protected(rq->timeline,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
430
!i915_request_signaled(rq));
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
433
return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
436
int gen8_emit_init_breadcrumb(struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
44
if (GRAPHICS_VER(rq->i915) == 9)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
440
GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
441
if (!i915_request_timeline(rq)->has_initial_breadcrumb)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
444
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
449
*cs++ = hwsp_offset(rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
451
*cs++ = rq->fence.seqno - 1;
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
473
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
476
rq->infix = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
478
__set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
48
if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
483
static int __xehp_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
488
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
494
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
519
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
524
int xehp_emit_bb_start_noarb(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
528
return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
531
int xehp_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
535
return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
538
int gen8_emit_bb_start_noarb(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
544
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
569
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
574
int gen8_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
580
if (unlikely(i915_request_has_nopreempt(rq)))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
581
return gen8_emit_bb_start_noarb(rq, offset, len, flags);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
583
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
597
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
60
cs = intel_ring_begin(rq, len);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
602
static void assert_request_valid(struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
604
struct intel_ring *ring __maybe_unused = rq->ring;
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
607
GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
615
static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
620
rq->wa_tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
623
assert_request_valid(rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
628
static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
636
*cs++ = preempt_address(rq->engine);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
644
gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
649
if (intel_engine_has_semaphores(rq->engine) &&
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
650
!intel_uc_uses_guc_submission(&rq->engine->gt->uc))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
651
cs = emit_preempt_busywait(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
653
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
654
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
656
return gen8_emit_wa_tail(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
659
static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
661
return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
664
u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
666
return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
669
u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
681
rq->fence.seqno,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
682
hwsp_offset(rq),
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
686
return gen8_emit_fini_breadcrumb_tail(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
689
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
702
rq->fence.seqno,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
703
hwsp_offset(rq),
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
707
return gen8_emit_fini_breadcrumb_tail(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
729
static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
737
*cs++ = preempt_address(rq->engine);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
748
static u32 hold_switchout_semaphore_offset(struct i915_request *rq)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
750
return i915_ggtt_offset(rq->context->state) +
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
757
static u32 *hold_switchout_emit_wa_busywait(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
76
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
763
*cs++ = hold_switchout_semaphore_offset(rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
779
*cs++ = hold_switchout_semaphore_offset(rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
786
gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
791
if (intel_engine_has_semaphores(rq->engine) &&
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
792
!intel_uc_uses_guc_submission(&rq->engine->gt->uc))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
793
cs = gen12_emit_preempt_busywait(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
798
if (intel_engine_uses_wa_hold_switchout(rq->engine))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
799
cs = hold_switchout_emit_wa_busywait(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
801
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
802
assert_ring_tail_valid(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
804
return gen8_emit_wa_tail(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
807
u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
81
int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
810
cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
811
return gen12_emit_fini_breadcrumb_tail(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
814
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
816
struct drm_i915_private *i915 = rq->i915;
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
817
struct intel_gt *gt = rq->engine->gt;
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
826
if (GRAPHICS_VER_FULL(rq->i915) < IP_VER(12, 70))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
839
if (!HAS_3D_PIPELINE(rq->i915))
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
841
else if (rq->engine->class == COMPUTE_CLASS)
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
848
rq->fence.seqno,
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
849
hwsp_offset(rq),
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
85
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
854
return gen12_emit_fini_breadcrumb_tail(rq, cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
20
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
21
int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
22
int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
24
int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
25
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
27
int gen8_emit_init_breadcrumb(struct i915_request *rq);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
29
int gen8_emit_bb_start_noarb(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
32
int gen8_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
36
int xehp_emit_bb_start_noarb(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
39
int xehp_emit_bb_start(struct i915_request *rq,
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
43
u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
44
u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
46
u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
47
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
48
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
113
check_signal_order(struct intel_context *ce, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
115
if (rq->context != ce)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
118
if (!list_is_last(&rq->signal_link, &ce->signals) &&
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
119
i915_seqno_passed(rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
120
list_next_entry(rq, signal_link)->fence.seqno))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
123
if (!list_is_first(&rq->signal_link, &ce->signals) &&
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
124
i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
125
rq->fence.seqno))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
214
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
216
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
219
if (!__i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
223
&rq->fence.flags))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
232
list_del_rcu(&rq->signal_link);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
236
if (intel_timeline_is_last(ce->timeline, rq))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
241
if (__dma_fence_signal(&rq->fence))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
243
signal = slist_add(&rq->signal_node, signal);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
245
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
252
struct i915_request *rq =
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
253
llist_entry(signal, typeof(*rq), signal_node);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
256
if (rq->engine->sched_engine->retire_inflight_request_prio)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
257
rq->engine->sched_engine->retire_inflight_request_prio(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
259
spin_lock(&rq->lock);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
260
list_replace(&rq->fence.cb_list, &cb_list);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
261
__dma_fence_signal__timestamp(&rq->fence, timestamp);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
262
__dma_fence_signal__notify(&rq->fence, &cb_list);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
263
spin_unlock(&rq->lock);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
265
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
339
static void irq_signal_request(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
342
if (!__dma_fence_signal(&rq->fence))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
345
i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
346
if (llist_add(&rq->signal_node, &b->signaled_requests))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
350
static void insert_breadcrumb(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
352
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
353
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
356
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
364
if (__i915_request_is_complete(rq)) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
365
irq_signal_request(rq, b);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
392
if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
397
i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
398
list_add_rcu(&rq->signal_link, pos);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
399
GEM_BUG_ON(!check_signal_order(ce, rq));
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
400
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
401
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
408
if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
412
bool i915_request_enable_breadcrumb(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
414
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
417
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
426
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
430
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
431
insert_breadcrumb(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
437
void i915_request_cancel_breadcrumb(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
439
struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
440
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
444
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
449
list_del_rcu(&rq->signal_link);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
455
if (__i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
456
irq_signal_request(rq, b);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
458
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
464
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
473
list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
474
GEM_BUG_ON(!__i915_request_is_complete(rq));
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
476
&rq->fence.flags))
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
479
list_del_rcu(&rq->signal_link);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
480
irq_signal_request(rq, b);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
481
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
497
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
503
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
505
rq->fence.context, rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
506
__i915_request_is_complete(rq) ? "!" :
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
507
__i915_request_has_started(rq) ? "*" :
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
509
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
drivers/gpu/drm/i915/gt/intel_context.c
483
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_context.c
489
GEM_BUG_ON(rq->context == ce);
drivers/gpu/drm/i915/gt/intel_context.c
491
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
drivers/gpu/drm/i915/gt/intel_context.c
493
err = i915_active_fence_set(&tl->last_request, rq);
drivers/gpu/drm/i915/gt/intel_context.c
506
return i915_active_add_request(&ce->active, rq);
drivers/gpu/drm/i915/gt/intel_context.c
512
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_context.c
519
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/intel_context.c
525
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/intel_context.c
527
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/intel_context.c
532
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/intel_context.c
533
return rq;
drivers/gpu/drm/i915/gt/intel_context.c
539
lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
drivers/gpu/drm/i915/gt/intel_context.c
542
rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
drivers/gpu/drm/i915/gt/intel_context.c
544
return rq;
drivers/gpu/drm/i915/gt/intel_context.c
550
struct i915_request *rq, *active = NULL;
drivers/gpu/drm/i915/gt/intel_context.c
562
list_for_each_entry_reverse(rq, &parent->guc_state.requests,
drivers/gpu/drm/i915/gt/intel_context.c
564
if (rq->context != ce)
drivers/gpu/drm/i915/gt/intel_context.c
566
if (i915_request_completed(rq))
drivers/gpu/drm/i915/gt/intel_context.c
569
active = rq;
drivers/gpu/drm/i915/gt/intel_context.c
625
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_context.c
632
ce->ops->revoke(ce, rq,
drivers/gpu/drm/i915/gt/intel_context.h
128
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_context.h
131
return ce->ops->cancel_request(ce, rq);
drivers/gpu/drm/i915/gt/intel_context.h
274
struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_context.h
328
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
16
static int gen8_emit_rpcs_config(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_context_sseu.c
23
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
33
*cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
35
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
43
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_context_sseu.c
57
rq = intel_engine_create_kernel_request(ce->engine);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
58
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_context_sseu.c
59
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
64
ret = intel_context_prepare_remote_request(ce, rq);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
66
ret = gen8_emit_rpcs_config(rq, ce, sseu);
drivers/gpu/drm/i915/gt/intel_context_sseu.c
68
i915_request_add(rq);
drivers/gpu/drm/i915/gt/intel_context_types.h
43
void (*revoke)(struct intel_context *ce, struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_context_types.h
54
struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_engine.h
276
struct intel_context **ce, struct i915_request **rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1301
struct i915_request rq;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1318
frame->rq.i915 = engine->i915;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1319
frame->rq.engine = engine;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1320
frame->rq.context = ce;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1321
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1322
frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1330
frame->rq.ring = &frame->ring;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1335
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1992
static struct intel_timeline *get_timeline(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2005
tl = rcu_dereference(rq->timeline);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2013
static int print_ring(char *buf, int sz, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2017
if (!i915_request_signaled(rq)) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2018
struct intel_timeline *tl = get_timeline(rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2022
i915_ggtt_offset(rq->ring->vma),
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2024
hwsp_seqno(rq),
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2025
DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2141
struct i915_request * const *port, *rq;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2176
for (port = execlists->active; (rq = *port); port++) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2183
rq->context->lrc.ccid,
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2184
intel_context_is_closed(rq->context) ? "!" : "",
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2185
intel_context_is_banned(rq->context) ? "*" : "");
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2186
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2188
i915_request_show(m, rq, hdr, 0);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2190
for (port = execlists->pending; (rq = *port); port++) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2197
rq->context->lrc.ccid,
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2198
intel_context_is_closed(rq->context) ? "!" : "",
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2199
intel_context_is_banned(rq->context) ? "*" : "");
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2200
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2202
i915_request_show(m, rq, hdr, 0);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2216
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2218
struct i915_vma_resource *vma_res = rq->batch_res;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2224
rq->head, rq->postfix, rq->tail,
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2228
size = rq->tail - rq->head;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2229
if (rq->tail < rq->head)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2230
size += rq->ring->size;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2234
const void *vaddr = rq->ring->vaddr;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2235
unsigned int head = rq->head;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2238
if (rq->tail < head) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2239
len = rq->ring->size - head;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2285
static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2287
struct intel_timeline *tl = get_timeline(rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2289
i915_request_show(m, rq, msg, 0);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2292
i915_ggtt_offset(rq->ring->vma));
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2294
rq->ring->head);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2296
rq->ring->tail);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2298
rq->ring->emit);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2300
rq->ring->space);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2308
print_request_ring(m, rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2310
if (rq->context->lrc_reg_state) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2312
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2320
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2324
list_for_each_entry(rq, requests, sched.link) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2325
if (rq == hung_rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2328
state = i915_test_request_state(rq);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2337
engine_dump_request(rq, m, msg);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2377
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2405
rq = READ_ONCE(engine->heartbeat.systole);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2406
if (rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2408
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2515
struct intel_context **ce, struct i915_request **rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2523
*rq = intel_context_get_active_request(*ce);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2535
*rq = engine_execlist_find_hung_request(engine);
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2536
if (*rq)
drivers/gpu/drm/i915/gt/intel_engine_cs.c
2537
*rq = i915_request_get_rcu(*rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
105
if (!rq) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
113
rq->fence.context,
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
114
rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
115
rq->sched.attr.priority);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
120
reset_engine(struct intel_engine_cs *engine, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
123
show_heartbeat(rq, engine);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
145
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
151
rq = engine->heartbeat.systole;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
152
if (rq && i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
153
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
173
rq->emitted_jiffies + msecs_to_jiffies(delay)))
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
176
if (!i915_sw_fence_signaled(&rq->submit)) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
188
rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
196
if (rq->sched.attr.priority >= attr.priority)
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
198
if (rq->sched.attr.priority >= attr.priority)
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
202
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
205
reset_engine(engine, rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
208
rq->emitted_jiffies = jiffies;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
226
rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
227
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
230
heartbeat_commit(rq, &attr);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
28
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
282
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
288
rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
289
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
290
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
292
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
294
heartbeat_commit(rq, &attr);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
295
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
33
rq = engine->heartbeat.systole;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
387
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
401
rq = heartbeat_create(ce, GFP_KERNEL);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
402
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
403
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
407
heartbeat_commit(rq, &attr);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
44
if (rq && rq->sched.attr.priority >= I915_PRIORITY_BARRIER &&
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
73
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
76
rq = __i915_request_create(ce, gfp);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
79
return rq;
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
82
static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
85
i915_request_add_active_barriers(rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
87
engine->heartbeat.systole = i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
90
static void heartbeat_commit(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
93
idle_pulse(rq->engine, rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
95
__i915_request_commit(rq);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
96
__i915_request_queue(rq, attr);
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
99
static void show_heartbeat(const struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_engine_pm.c
101
struct i915_request *rq = to_request(fence);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
103
ewma__engine_latency_add(&rq->engine->latency,
drivers/gpu/drm/i915/gt/intel_engine_pm.c
104
ktime_us_delta(rq->fence.timestamp,
drivers/gpu/drm/i915/gt/intel_engine_pm.c
105
rq->duration.emitted));
drivers/gpu/drm/i915/gt/intel_engine_pm.c
109
__queue_and_release_pm(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_engine_pm.c
123
GEM_BUG_ON(rq->context->active_count != 1);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
125
rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
143
__i915_request_queue_bh(rq);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
154
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_pm.c
212
rq = __i915_request_create(ce, GFP_NOWAIT);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
213
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/intel_engine_pm.c
219
i915_request_add_active_barriers(rq);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
222
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/intel_engine_pm.c
223
if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
drivers/gpu/drm/i915/gt/intel_engine_pm.c
231
BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
drivers/gpu/drm/i915/gt/intel_engine_pm.c
232
dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
drivers/gpu/drm/i915/gt/intel_engine_pm.c
233
rq->duration.emitted = ktime_get();
drivers/gpu/drm/i915/gt/intel_engine_pm.c
237
__queue_and_release_pm(rq, ce->timeline, engine);
drivers/gpu/drm/i915/gt/intel_engine_pm.h
102
rq = i915_request_create(engine->kernel_context);
drivers/gpu/drm/i915/gt/intel_engine_pm.h
105
return rq;
drivers/gpu/drm/i915/gt/intel_engine_pm.h
91
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_engine_types.h
531
int (*request_alloc)(struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_engine_types.h
537
int (*emit_bb_start)(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_engine_types.h
542
int (*emit_init_breadcrumb)(struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_engine_types.h
543
u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_engine_types.h
553
void (*submit_request)(struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_engine_types.h
560
void (*add_active_request)(struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_engine_types.h
561
void (*remove_active_request)(struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1002
if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1030
struct i915_request *rq = READ_ONCE(ve->request);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1033
if (!rq || !virtual_matches(ve, rq, engine)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1072
static void defer_request(struct i915_request *rq, struct list_head * const pl)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1086
GEM_BUG_ON(i915_request_is_active(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1087
list_move_tail(&rq->sched.link, pl);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1089
for_each_waiter(p, rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1097
if (w->engine != rq->engine)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1103
!__i915_request_is_complete(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1108
if (rq_prio(w) < rq_prio(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1111
GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1116
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1117
} while (rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1122
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1124
rq = __unwind_incomplete_requests(engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1125
if (!rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1128
defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1129
rq_prio(rq)));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1134
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1148
return rq->context->lrc.ccid == READ_ONCE(el->yield);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1152
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1158
if (!rq || __i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1166
if (!list_is_last_rcu(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1187
timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1191
if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1194
if (!needs_timeslice(engine, rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1197
return timer_expired(&el->timer) || timeslice_yield(el, rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1237
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1239
if (!rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1243
engine->execlists.preempt_target = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1246
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1253
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1259
active_preempt_timeout(engine, rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1262
static bool completed(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1264
if (i915_request_has_sentinel(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1267
return __i915_request_is_complete(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1411
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1415
rq = ve->request;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1416
if (unlikely(!virtual_matches(ve, rq, engine)))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1419
GEM_BUG_ON(rq->engine != &ve->base);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1420
GEM_BUG_ON(rq->context != &ve->context);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1422
if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1427
if (last && !can_merge_rq(last, rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1435
rq->fence.context,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1436
rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1437
__i915_request_is_complete(rq) ? "!" :
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1438
__i915_request_has_started(rq) ? "*" :
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1449
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1450
WRITE_ONCE(rq->engine, engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1452
if (__i915_request_submit(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1470
last = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1473
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1490
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1492
priolist_for_each_request_consume(rq, rn, p) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1506
if (last && !can_merge_rq(last, rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1520
if (last->context == rq->context)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1532
if (rq->execution_mask != engine->mask)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1543
ctx_single_port_submission(rq->context))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1549
if (__i915_request_submit(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1557
rq->context));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1560
rq->fence.seqno));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1563
last = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1965
struct i915_request *rq = *execlists->active;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1967
rq->context->lrc_reg_state;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1980
i915_ggtt_offset(rq->ring->vma),
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1981
rq->head, rq->tail,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1982
rq->fence.context,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1983
lower_32_bits(rq->fence.seqno),
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1984
hwsp_seqno(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2053
static void __execlists_hold(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2060
if (i915_request_is_active(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2061
__i915_request_unsubmit(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2063
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2064
list_move_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2065
&rq->engine->sched_engine->hold);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2066
i915_request_set_hold(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2067
RQ_TRACE(rq, "on hold\n");
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2069
for_each_waiter(p, rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2077
if (w->engine != rq->engine)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2092
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2093
} while (rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2097
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2099
if (i915_request_on_hold(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2104
if (__i915_request_is_complete(rq)) { /* too late! */
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2105
rq = NULL;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2115
GEM_BUG_ON(i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2116
GEM_BUG_ON(rq->engine != engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2117
__execlists_hold(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2122
return rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2125
static bool hold_request(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2135
for_each_signaler(p, rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2139
if (s->engine != rq->engine)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2151
static void __execlists_unhold(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2158
RQ_TRACE(rq, "hold release\n");
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2160
GEM_BUG_ON(!i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2161
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2163
i915_request_clear_hold(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2164
list_move_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2165
i915_sched_lookup_priolist(rq->engine->sched_engine,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2166
rq_prio(rq)));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2167
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2170
for_each_waiter(p, rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2177
if (w->engine != rq->engine)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
219
struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2190
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2191
} while (rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2195
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2203
__execlists_unhold(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2205
if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2206
engine->sched_engine->queue_priority_hint = rq_prio(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2215
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
222
struct i915_request *active = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2224
struct intel_engine_cs *engine = cap->rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2229
vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
224
list_for_each_entry_from_reverse(rq, &tl->requests, link) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2246
execlists_unhold(engine, cap->rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2247
i915_request_put(cap->rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
225
if (__i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
229
i915_request_set_error_once(rq, error);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2290
struct i915_request * const *port, *rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2298
for (port = el->active; (rq = *port); port++) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2299
if (rq->context->lrc.ccid == ccid) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
230
__i915_request_skip(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2303
return rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2307
for (port = el->pending; (rq = *port); port++) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2308
if (rq->context->lrc.ccid == ccid) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2312
return rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
232
active = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2343
cap->rq = active_context(engine, active_ccid(engine));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2344
if (cap->rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2345
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2346
cap->rq = i915_request_get_rcu(cap->rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2349
if (!cap->rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2372
if (!execlists_hold(engine, cap->rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2380
i915_request_put(cap->rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
239
active_request(const struct intel_timeline * const tl, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
241
return __active_request(tl, rq, 0);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2440
const struct i915_request *rq = *engine->execlists.active;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2453
if (rq == engine->execlists.preempt_target)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2457
active_preempt_timeout(engine, rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2548
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2550
GEM_BUG_ON(!list_empty(&rq->sched.link));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2551
list_add_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2553
rq_prio(rq)));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2554
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2558
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2562
if (rq_prio(rq) <= sched_engine->queue_priority_hint)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2565
sched_engine->queue_priority_hint = rq_prio(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2570
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2572
GEM_BUG_ON(i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2573
return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
262
static int rq_prio(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
264
return READ_ONCE(rq->sched.attr.priority);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2640
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2644
i915_request_active_engine(rq, &engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
267
static int effective_prio(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
269
int prio = rq_prio(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2711
static int emit_pdps(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2713
const struct intel_engine_cs * const engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2714
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2718
GEM_BUG_ON(intel_vgpu_active(rq->i915));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2727
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2733
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2736
err = engine->emit_flush(rq, EMIT_FLUSH);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2741
err = engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2745
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2761
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
2763
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
279
if (i915_request_has_nopreempt(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3027
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3035
rq = active_context(engine, engine->execlists.reset_ccid);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3036
if (!rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3039
ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
304
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3042
if (__i915_request_is_complete(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3044
head = intel_ring_wrap(ce->ring, rq->tail);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3054
rq = active_request(ce->timeline, rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3055
head = intel_ring_wrap(ce->ring, rq->head);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3070
if (!__i915_request_has_started(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3084
__i915_request_reset(rq, stalled);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3148
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3174
list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3175
i915_request_put(i915_request_mark_eio(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3182
priolist_for_each_request_consume(rq, rn, p) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3183
if (i915_request_mark_eio(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3184
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3185
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3194
list_for_each_entry(rq, &sched_engine->hold, sched.link)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3195
i915_request_put(i915_request_mark_eio(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3206
rq = fetch_and_zero(&ve->request);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3207
if (rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3208
if (i915_request_mark_eio(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3209
rq->engine = engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3210
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3211
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3213
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3277
static void add_to_engine(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3279
lockdep_assert_held(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3280
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3283
static void remove_from_engine(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
329
last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3293
locked = READ_ONCE(rq->engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3295
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3300
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3302
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3303
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3306
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3310
i915_request_notify_execute_cb_imm(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3318
static void kick_execlists(const struct i915_request *rq, int prio)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3320
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3342
if (inflight->context == rq->context)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3348
rq->fence.context, rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
337
if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
338
rq_prio(list_next_entry(rq, sched.link)) > last_prio)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
375
struct i915_request *rq, *rn, *active = NULL;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3784
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3787
rq = READ_ONCE(ve->request);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3788
if (!rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3792
mask = rq->execution_mask;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3795
i915_request_set_error_once(rq, -ENODEV);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3800
rq->fence.context, rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
381
list_for_each_entry_safe_reverse(rq, rn,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
384
if (__i915_request_is_complete(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
385
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
389
__i915_request_unsubmit(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3891
static void virtual_submit_request(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3893
struct virtual_engine *ve = to_virtual_engine(rq->engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3897
rq->fence.context,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3898
rq->fence.seqno);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3905
if (__i915_request_is_complete(rq)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3906
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
391
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3916
ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3917
ve->request = i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
392
if (rq_prio(rq) != prio) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3920
list_move_tail(&rq->sched.link, virtual_queue(ve));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
393
prio = rq_prio(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
399
list_move(&rq->sched.link, pl);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
400
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
403
if (intel_ring_direction(rq->ring,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
404
rq->tail,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
405
rq->ring->tail + 8) > 0)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
406
rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4076
const struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
408
active = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4083
struct i915_request *rq, *last;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4092
list_for_each_entry(rq, &sched_engine->requests, sched.link) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4094
show_request(m, rq, "\t\t", 0);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4096
last = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4116
priolist_for_each_request(rq, p) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4118
show_request(m, rq, "\t\t", 0);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4120
last = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4137
struct i915_request *rq = READ_ONCE(ve->request);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4139
if (rq) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4141
show_request(m, rq, "\t\t", 0);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4143
last = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
415
execlists_context_status_change(struct i915_request *rq, unsigned long status)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
424
atomic_notifier_call_chain(&rq->engine->context_status_notifier,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
425
status, rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
428
static void reset_active(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
431
struct intel_context * const ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
450
rq->fence.context, rq->fence.seqno);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
453
if (__i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
454
head = rq->tail;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
456
head = __active_request(ce->timeline, rq, -EIO)->head;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
466
static bool bad_request(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
468
return rq->fence.error && i915_request_started(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
472
__execlists_schedule_in(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
474
struct intel_engine_cs * const engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
475
struct intel_context * const ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
483
if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
484
reset_active(rq, engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
519
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
527
static void execlists_schedule_in(struct i915_request *rq, int idx)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
529
struct intel_context * const ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
532
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
533
trace_i915_request_in(rq, idx);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
537
old = __execlists_schedule_in(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
540
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
544
resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
546
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
550
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
551
WRITE_ONCE(rq->engine, &ve->base);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
552
ve->base.submit_request(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
557
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
560
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
578
if (i915_request_in_priority_queue(rq) &&
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
579
rq->execution_mask != engine->mask)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
580
resubmit_virtual_request(rq, ve);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
586
static void __execlists_schedule_out(struct i915_request * const rq,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
589
struct intel_engine_cs * const engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
608
if (intel_timeline_is_last(ce->timeline, rq) &&
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
609
__i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
627
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
642
kick_siblings(rq, ce);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
648
static inline void execlists_schedule_out(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
650
struct intel_context * const ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
652
trace_i915_request_out(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
657
__execlists_schedule_out(rq, ce);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
659
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
672
static u64 execlists_update_context(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
674
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
679
if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
680
desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
699
GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
700
prev = rq->ring->tail;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
701
tail = intel_ring_set_tail(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
702
if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
705
rq->tail = rq->wa_tail;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
735
dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
737
if (!rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
742
rq->context->lrc.ccid,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
743
rq->fence.context, rq->fence.seqno,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
744
__i915_request_is_complete(rq) ? "!" :
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
745
__i915_request_has_started(rq) ? "*" :
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
747
rq_prio(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
781
struct i915_request * const *port, *rq, *prev = NULL;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
803
for (port = execlists->pending; (rq = *port); port++) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
807
GEM_BUG_ON(!kref_read(&rq->fence.refcount));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
808
GEM_BUG_ON(!i915_request_is_active(rq));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
810
if (ce == rq->context) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
817
ce = rq->context;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
845
prev = rq;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
852
if (rq->execution_mask != engine->mask &&
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
862
if (!spin_trylock_irqsave(&rq->lock, flags))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
865
if (__i915_request_is_complete(rq))
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
897
spin_unlock_irqrestore(&rq->lock, flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
929
struct i915_request *rq = execlists->pending[n];
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
932
rq ? execlists_update_context(rq) : 0,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
959
static unsigned long i915_request_flags(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
961
return READ_ONCE(rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
994
const struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
999
if (!rq)
drivers/gpu/drm/i915/gt/intel_execlists_submission.h
30
const struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_ggtt.c
348
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_ggtt.c
376
rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
drivers/gpu/drm/i915/gt/intel_ggtt.c
378
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_ggtt.c
384
cs = intel_ring_begin(rq, 2 * n_ptes + 2);
drivers/gpu/drm/i915/gt/intel_ggtt.c
387
i915_request_set_error_once(rq, PTR_ERR(cs));
drivers/gpu/drm/i915/gt/intel_ggtt.c
414
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_ggtt.c
416
i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_ggtt.c
417
__i915_request_commit(rq);
drivers/gpu/drm/i915/gt/intel_ggtt.c
418
__i915_request_queue(rq, &attr);
drivers/gpu/drm/i915/gt/intel_ggtt.c
422
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/i915/gt/intel_ggtt.c
423
if (rq->fence.error)
drivers/gpu/drm/i915/gt/intel_ggtt.c
426
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_ggtt.c
436
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_gt.c
533
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_gt.c
548
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/intel_gt.c
549
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_gt.c
550
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_gt.c
554
err = intel_engine_emit_ctx_wa(rq);
drivers/gpu/drm/i915/gt/intel_gt.c
558
err = intel_renderstate_emit(&so, rq);
drivers/gpu/drm/i915/gt/intel_gt.c
563
requests[id] = i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_gt.c
564
i915_request_add(rq);
drivers/gpu/drm/i915/gt/intel_gt.c
581
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_gt.c
584
rq = requests[id];
drivers/gpu/drm/i915/gt/intel_gt.c
585
if (!rq)
drivers/gpu/drm/i915/gt/intel_gt.c
588
if (rq->fence.error) {
drivers/gpu/drm/i915/gt/intel_gt.c
593
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
drivers/gpu/drm/i915/gt/intel_gt.c
594
if (!rq->context->state)
drivers/gpu/drm/i915/gt/intel_gt.c
598
state = shmem_create_from_object(rq->context->state->obj);
drivers/gpu/drm/i915/gt/intel_gt.c
603
rq->engine->default_state = state;
drivers/gpu/drm/i915/gt/intel_gt.c
617
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_gt.c
619
rq = requests[id];
drivers/gpu/drm/i915/gt/intel_gt.c
620
if (!rq)
drivers/gpu/drm/i915/gt/intel_gt.c
623
ce = rq->context;
drivers/gpu/drm/i915/gt/intel_gt.c
624
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
25
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
30
return i915_active_add_request(&node->active, rq);
drivers/gpu/drm/i915/gt/intel_gt_requests.c
19
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/intel_gt_requests.c
21
list_for_each_entry_safe(rq, rn, &tl->requests, link)
drivers/gpu/drm/i915/gt/intel_gt_requests.c
22
if (!i915_request_retire(rq))
drivers/gpu/drm/i915/gt/intel_gt_requests.c
243
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/intel_gt_requests.c
250
llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
drivers/gpu/drm/i915/gt/intel_gt_requests.c
251
if (!i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/intel_gt_requests.c
252
struct dma_fence *f = &rq->fence;
drivers/gpu/drm/i915/gt/intel_gt_requests.c
264
i915_request_cancel(rq, -EINTR);
drivers/gpu/drm/i915/gt/intel_gt_requests.c
266
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
1010
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/intel_migrate.c
1011
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_migrate.c
1012
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
1017
err = i915_request_await_deps(rq, deps);
drivers/gpu/drm/i915/gt/intel_migrate.c
1021
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gt/intel_migrate.c
1022
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
1031
err = emit_no_arbitration(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
1035
len = emit_pte(rq, &it, pat_index, is_lmem, offset, CHUNK_SZ);
drivers/gpu/drm/i915/gt/intel_migrate.c
1041
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
1045
err = emit_clear(rq, offset, len, value, is_lmem);
drivers/gpu/drm/i915/gt/intel_migrate.c
1054
err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
drivers/gpu/drm/i915/gt/intel_migrate.c
1060
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
1066
*out = i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
1067
i915_request_add(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
334
static int emit_no_arbitration(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_migrate.c
338
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/intel_migrate.c
345
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_migrate.c
350
static int max_pte_pkt_size(struct i915_request *rq, int pkt)
drivers/gpu/drm/i915/gt/intel_migrate.c
352
struct intel_ring *ring = rq->ring;
drivers/gpu/drm/i915/gt/intel_migrate.c
354
pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
drivers/gpu/drm/i915/gt/intel_migrate.c
362
static int emit_pte(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_migrate.c
369
bool has_64K_pages = HAS_64K_PAGES(rq->i915);
drivers/gpu/drm/i915/gt/intel_migrate.c
370
const u64 encode = rq->context->vm->pte_encode(0, pat_index,
drivers/gpu/drm/i915/gt/intel_migrate.c
372
struct intel_ring *ring = rq->ring;
drivers/gpu/drm/i915/gt/intel_migrate.c
378
GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
drivers/gpu/drm/i915/gt/intel_migrate.c
401
offset += (u64)rq->engine->instance << 32;
drivers/gpu/drm/i915/gt/intel_migrate.c
403
cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
drivers/gpu/drm/i915/gt/intel_migrate.c
408
pkt = max_pte_pkt_size(rq, dword_length);
drivers/gpu/drm/i915/gt/intel_migrate.c
423
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_migrate.c
426
cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
drivers/gpu/drm/i915/gt/intel_migrate.c
441
pkt = max_pte_pkt_size(rq, dword_rem);
drivers/gpu/drm/i915/gt/intel_migrate.c
472
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_migrate.c
530
static int emit_copy_ccs(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_migrate.c
534
struct drm_i915_private *i915 = rq->i915;
drivers/gpu/drm/i915/gt/intel_migrate.c
535
int mocs = rq->engine->gt->mocs.uc_index << 1;
drivers/gpu/drm/i915/gt/intel_migrate.c
539
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/gt/intel_migrate.c
567
*cs++ = rq->engine->instance |
drivers/gpu/drm/i915/gt/intel_migrate.c
570
*cs++ = rq->engine->instance |
drivers/gpu/drm/i915/gt/intel_migrate.c
576
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_migrate.c
581
static int emit_copy(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_migrate.c
584
const int ver = GRAPHICS_VER(rq->i915);
drivers/gpu/drm/i915/gt/intel_migrate.c
585
u32 instance = rq->engine->instance;
drivers/gpu/drm/i915/gt/intel_migrate.c
588
cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
drivers/gpu/drm/i915/gt/intel_migrate.c
624
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_migrate.c
694
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_migrate.c
753
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/intel_migrate.c
754
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_migrate.c
755
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
760
err = i915_request_await_deps(rq, deps);
drivers/gpu/drm/i915/gt/intel_migrate.c
764
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gt/intel_migrate.c
765
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
774
err = emit_no_arbitration(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
781
len = emit_pte(rq, &it_src, src_pat_index, src_is_lmem,
drivers/gpu/drm/i915/gt/intel_migrate.c
792
err = emit_pte(rq, &it_dst, dst_pat_index, dst_is_lmem,
drivers/gpu/drm/i915/gt/intel_migrate.c
801
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
805
err = emit_copy(rq, dst_offset, src_offset, len);
drivers/gpu/drm/i915/gt/intel_migrate.c
814
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
819
err = emit_pte(rq, &it_ccs, ccs_pat_index, false,
drivers/gpu/drm/i915/gt/intel_migrate.c
829
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
833
err = emit_copy_ccs(rq, dst_offset, dst_access,
drivers/gpu/drm/i915/gt/intel_migrate.c
838
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
843
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
859
err = emit_copy_ccs(rq,
drivers/gpu/drm/i915/gt/intel_migrate.c
870
err = emit_copy_ccs(rq,
drivers/gpu/drm/i915/gt/intel_migrate.c
879
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_migrate.c
888
*out = i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
889
i915_request_add(rq);
drivers/gpu/drm/i915/gt/intel_migrate.c
917
static int emit_clear(struct i915_request *rq, u32 offset, int size,
drivers/gpu/drm/i915/gt/intel_migrate.c
920
struct drm_i915_private *i915 = rq->i915;
drivers/gpu/drm/i915/gt/intel_migrate.c
921
int mocs = rq->engine->gt->mocs.uc_index << 1;
drivers/gpu/drm/i915/gt/intel_migrate.c
935
cs = intel_ring_begin(rq, ring_sz);
drivers/gpu/drm/i915/gt/intel_migrate.c
947
*cs++ = rq->engine->instance;
drivers/gpu/drm/i915/gt/intel_migrate.c
967
*cs++ = rq->engine->instance;
drivers/gpu/drm/i915/gt/intel_migrate.c
979
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_migrate.c
994
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_renderstate.c
212
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_renderstate.c
214
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_renderstate.c
220
err = i915_vma_move_to_active(so->vma, rq, 0);
drivers/gpu/drm/i915/gt/intel_renderstate.c
224
err = engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/intel_renderstate.c
231
err = engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/intel_renderstate.h
48
struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_reset.c
114
static void mark_innocent(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_reset.c
119
ctx = rcu_dereference(rq->context->gem_context);
drivers/gpu/drm/i915/gt/intel_reset.c
125
void __i915_request_reset(struct i915_request *rq, bool guilty)
drivers/gpu/drm/i915/gt/intel_reset.c
129
RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
drivers/gpu/drm/i915/gt/intel_reset.c
130
GEM_BUG_ON(__i915_request_is_complete(rq));
drivers/gpu/drm/i915/gt/intel_reset.c
134
i915_request_set_error_once(rq, -EIO);
drivers/gpu/drm/i915/gt/intel_reset.c
135
__i915_request_skip(rq);
drivers/gpu/drm/i915/gt/intel_reset.c
136
banned = mark_guilty(rq);
drivers/gpu/drm/i915/gt/intel_reset.c
138
i915_request_set_error_once(rq, -EAGAIN);
drivers/gpu/drm/i915/gt/intel_reset.c
139
mark_innocent(rq);
drivers/gpu/drm/i915/gt/intel_reset.c
144
intel_context_ban(rq->context, rq);
drivers/gpu/drm/i915/gt/intel_reset.c
63
static bool mark_guilty(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_reset.c
70
if (intel_context_is_closed(rq->context))
drivers/gpu/drm/i915/gt/intel_reset.c
74
ctx = rcu_dereference(rq->context->gem_context);
drivers/gpu/drm/i915/gt/intel_reset.c
79
return intel_context_is_banned(rq->context);
drivers/gpu/drm/i915/gt/intel_reset.h
41
void __i915_request_reset(struct i915_request *rq, bool guilty);
drivers/gpu/drm/i915/gt/intel_ring.c
230
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
drivers/gpu/drm/i915/gt/intel_ring.c
232
struct intel_ring *ring = rq->ring;
drivers/gpu/drm/i915/gt/intel_ring.c
242
total_bytes = bytes + rq->reserved_space;
drivers/gpu/drm/i915/gt/intel_ring.c
263
total_bytes = rq->reserved_space + remain_actual;
drivers/gpu/drm/i915/gt/intel_ring.c
279
GEM_BUG_ON(!rq->reserved_space);
drivers/gpu/drm/i915/gt/intel_ring.c
282
i915_request_timeline(rq),
drivers/gpu/drm/i915/gt/intel_ring.h
18
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
drivers/gpu/drm/i915/gt/intel_ring.h
40
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
drivers/gpu/drm/i915/gt/intel_ring.h
50
GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
drivers/gpu/drm/i915/gt/intel_ring.h
51
GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */
drivers/gpu/drm/i915/gt/intel_ring.h
80
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
drivers/gpu/drm/i915/gt/intel_ring.h
83
u32 offset = addr - rq->ring->vaddr;
drivers/gpu/drm/i915/gt/intel_ring.h
85
GEM_BUG_ON(offset > rq->ring->size);
drivers/gpu/drm/i915/gt/intel_ring.h
86
return intel_ring_wrap(rq->ring, offset);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1136
static void add_to_engine(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1138
lockdep_assert_held(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1139
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1142
static void remove_from_engine(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1144
spin_lock_irq(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1145
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1148
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1150
spin_unlock_irq(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
1152
i915_request_notify_execute_cb_imm(rq);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
390
struct i915_request *pos, *rq;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
394
rq = NULL;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
399
rq = pos;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
427
if (rq) {
drivers/gpu/drm/i915/gt/intel_ring_submission.c
443
__i915_request_reset(rq, stalled);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
445
GEM_BUG_ON(rq->ring != engine->legacy.ring);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
446
head = rq->head;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
643
struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_ring_submission.c
648
if (!rq || !i915_request_is_active(rq))
drivers/gpu/drm/i915/gt/intel_ring_submission.c
651
engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
653
list_for_each_entry_continue(rq, &engine->sched_engine->requests,
drivers/gpu/drm/i915/gt/intel_ring_submission.c
655
if (rq->context == ce) {
drivers/gpu/drm/i915/gt/intel_ring_submission.c
656
i915_request_set_error_once(rq, -EIO);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
657
__i915_request_skip(rq);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
662
struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
666
i915_request_active_engine(rq, &engine);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
693
static int load_pd_dir(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_ring_submission.c
697
const struct intel_engine_cs * const engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
700
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
722
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
724
return rq->engine->emit_flush(rq, EMIT_FLUSH);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
727
static int mi_set_context(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_ring_submission.c
731
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
752
cs = intel_ring_begin(rq, len);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
839
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
844
static int remap_l3_slice(struct i915_request *rq, int slice)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
847
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
drivers/gpu/drm/i915/gt/intel_ring_submission.c
853
cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
868
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
874
static int remap_l3(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
876
struct i915_gem_context *ctx = i915_request_gem_context(rq);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
886
err = remap_l3_slice(rq, i);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
895
static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
902
ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
914
ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
918
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
921
static int clear_residuals(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
923
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
926
ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
drivers/gpu/drm/i915/gt/intel_ring_submission.c
931
ret = mi_set_context(rq,
drivers/gpu/drm/i915/gt/intel_ring_submission.c
938
ret = engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/intel_ring_submission.c
944
ret = engine->emit_flush(rq, EMIT_FLUSH);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
949
return engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
952
static int switch_context(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_ring_submission.c
954
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
955
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/intel_ring_submission.c
964
ret = clear_residuals(rq);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
972
ret = switch_mm(rq, vm_alias(ce->vm));
drivers/gpu/drm/i915/gt/intel_ring_submission.c
991
ret = mi_set_context(rq, ce, flags);
drivers/gpu/drm/i915/gt/intel_ring_submission.c
996
ret = remap_l3(rq);
drivers/gpu/drm/i915/gt/intel_rps.c
1020
void intel_rps_boost(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_rps.c
1024
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
drivers/gpu/drm/i915/gt/intel_rps.c
1028
if (test_bit(CONTEXT_LOW_LATENCY, &rq->context->flags))
drivers/gpu/drm/i915/gt/intel_rps.c
1032
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
drivers/gpu/drm/i915/gt/intel_rps.c
1033
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
drivers/gpu/drm/i915/gt/intel_rps.c
1052
rq->fence.context, rq->fence.seqno);
drivers/gpu/drm/i915/gt/intel_rps.c
1067
rq->fence.context, rq->fence.seqno);
drivers/gpu/drm/i915/gt/intel_rps.c
2920
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_rps.c
2925
rq = to_request(fence);
drivers/gpu/drm/i915/gt/intel_rps.c
2927
if (!i915_request_started(rq))
drivers/gpu/drm/i915/gt/intel_rps.c
2928
intel_rps_boost(rq);
drivers/gpu/drm/i915/gt/intel_rps.h
30
void intel_rps_boost(struct i915_request *rq);
drivers/gpu/drm/i915/gt/intel_timeline.c
328
struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_timeline.c
414
const struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_timeline.c
425
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/intel_timeline.c
442
list_for_each_entry_safe(rq, rn, &tl->requests, link) {
drivers/gpu/drm/i915/gt/intel_timeline.c
443
if (i915_request_completed(rq))
drivers/gpu/drm/i915/gt/intel_timeline.c
447
if (i915_request_is_ready(rq))
drivers/gpu/drm/i915/gt/intel_timeline.c
449
if (i915_request_is_active(rq))
drivers/gpu/drm/i915/gt/intel_timeline.c
467
list_for_each_entry_safe(rq, rn, &tl->requests, link)
drivers/gpu/drm/i915/gt/intel_timeline.c
468
show_request(m, rq, "", 2);
drivers/gpu/drm/i915/gt/intel_timeline.h
74
struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_timeline.h
91
const struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_timeline.h
97
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_timeline.h
99
return list_is_last_rcu(&rq->link, &tl->requests);
drivers/gpu/drm/i915/gt/intel_workarounds.c
1004
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
drivers/gpu/drm/i915/gt/intel_workarounds.c
1008
if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
drivers/gpu/drm/i915/gt/intel_workarounds.c
1009
IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS)
drivers/gpu/drm/i915/gt/intel_workarounds.c
1010
cs = intel_ring_begin(rq, (wal->count * 2 + 6));
drivers/gpu/drm/i915/gt/intel_workarounds.c
1012
cs = intel_ring_begin(rq, (wal->count * 2 + 2));
drivers/gpu/drm/i915/gt/intel_workarounds.c
1044
if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
drivers/gpu/drm/i915/gt/intel_workarounds.c
1045
IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) {
drivers/gpu/drm/i915/gt/intel_workarounds.c
1056
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_workarounds.c
1058
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
drivers/gpu/drm/i915/gt/intel_workarounds.c
2989
wa_list_srm(struct i915_request *rq,
drivers/gpu/drm/i915/gt/intel_workarounds.c
2993
struct drm_i915_private *i915 = rq->i915;
drivers/gpu/drm/i915/gt/intel_workarounds.c
3007
cs = intel_ring_begin(rq, 4 * count);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3022
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3032
struct i915_request *rq;
drivers/gpu/drm/i915/gt/intel_workarounds.c
3061
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3062
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/intel_workarounds.c
3063
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3067
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3069
err = wa_list_srm(rq, wal, vma);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3071
i915_request_get(rq);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3073
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3074
i915_request_add(rq);
drivers/gpu/drm/i915/gt/intel_workarounds.c
3079
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/intel_workarounds.c
3092
if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
drivers/gpu/drm/i915/gt/intel_workarounds.c
3102
i915_request_put(rq);
drivers/gpu/drm/i915/gt/intel_workarounds.c
990
int intel_engine_emit_ctx_wa(struct i915_request *rq)
drivers/gpu/drm/i915/gt/intel_workarounds.c
992
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
drivers/gpu/drm/i915/gt/intel_workarounds.c
993
struct intel_uncore *uncore = rq->engine->uncore;
drivers/gpu/drm/i915/gt/intel_workarounds.h
25
int intel_engine_emit_ctx_wa(struct i915_request *rq);
drivers/gpu/drm/i915/gt/mock_engine.c
256
static void mock_add_to_engine(struct i915_request *rq)
drivers/gpu/drm/i915/gt/mock_engine.c
258
lockdep_assert_held(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/mock_engine.c
259
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
drivers/gpu/drm/i915/gt/mock_engine.c
262
static void mock_remove_from_engine(struct i915_request *rq)
drivers/gpu/drm/i915/gt/mock_engine.c
273
locked = READ_ONCE(rq->engine);
drivers/gpu/drm/i915/gt/mock_engine.c
275
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
drivers/gpu/drm/i915/gt/mock_engine.c
280
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/mock_engine.c
297
struct i915_request *rq;
drivers/gpu/drm/i915/gt/mock_engine.c
305
list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
drivers/gpu/drm/i915/gt/mock_engine.c
306
i915_request_put(i915_request_mark_eio(rq));
drivers/gpu/drm/i915/gt/mock_engine.c
310
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
drivers/gpu/drm/i915/gt/mock_engine.c
311
if (i915_request_mark_eio(rq)) {
drivers/gpu/drm/i915/gt/mock_engine.c
312
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/mock_engine.c
313
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
117
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_context.c
119
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_context.c
120
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
124
err = request_sync(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
129
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_context.c
130
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_context.c
131
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
134
err = request_sync(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
17
static int request_sync(struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_context.c
19
struct intel_timeline *tl = i915_request_timeline(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
237
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_context.c
24
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
241
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_context.c
242
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_context.c
243
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
247
err = request_sync(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
27
__i915_request_commit(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
28
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_context.c
29
__i915_request_queue_bh(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
31
timeout = i915_request_wait(rq, 0, HZ / 10);
drivers/gpu/drm/i915/gt/selftest_context.c
331
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_context.c
338
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_context.c
339
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_context.c
340
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
344
err = intel_context_prepare_remote_request(remote, rq);
drivers/gpu/drm/i915/gt/selftest_context.c
346
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
35
i915_request_retire_upto(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
350
err = request_sync(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
37
lockdep_unpin_lock(&tl->mutex, rq->cookie);
drivers/gpu/drm/i915/gt/selftest_context.c
40
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
53
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_context.c
59
rq = list_last_entry(&tl->requests, typeof(*rq), link);
drivers/gpu/drm/i915/gt/selftest_context.c
60
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
62
timeout = i915_request_wait(rq, 0, HZ / 10);
drivers/gpu/drm/i915/gt/selftest_context.c
66
i915_request_retire_upto(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
68
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_context.c
80
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
171
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
173
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
174
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
175
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
179
err = write_timestamp(rq, 2);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
183
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
189
err = write_timestamp(rq, 3);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
194
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
195
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
197
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
199
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
203
cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
315
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
317
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
318
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
319
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
323
err = write_timestamp(rq, 2);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
327
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
333
err = write_timestamp(rq, 3);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
337
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
344
err = write_timestamp(rq, 4);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
349
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
350
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
352
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
354
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
359
(rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
360
(rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
54
static int write_timestamp(struct i915_request *rq, int slot)
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
57
rcu_dereference_protected(rq->timeline,
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
58
!i915_request_signaled(rq));
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
62
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
67
if (GRAPHICS_VER(rq->i915) >= 8)
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
70
*cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine));
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
74
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
105
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
106
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
107
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
125
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
126
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
129
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
261
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
296
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
299
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
300
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
303
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
306
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
79
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
82
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
83
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
84
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
86
cs = intel_ring_begin(rq, 28);
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
88
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
100
if (i915_request_wait(rq, 0,
drivers/gpu/drm/i915/gt/selftest_execlists.c
104
rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_execlists.c
105
rq->fence.seqno);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1059
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1063
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1064
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
1065
return rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1068
err = i915_request_await_dma_fence(rq, &wait->fence);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1073
cs = intel_ring_begin(rq, 14);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1091
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
drivers/gpu/drm/i915/gt/selftest_execlists.c
1100
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1104
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1105
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1107
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1111
return rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1133
struct i915_request *rq[3] = {};
drivers/gpu/drm/i915/gt/selftest_execlists.c
1164
rq[A1] = create_rewinder(ce, NULL, slot, X);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1165
if (IS_ERR(rq[A1])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1170
rq[A2] = create_rewinder(ce, NULL, slot, Y);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1172
if (IS_ERR(rq[A2]))
drivers/gpu/drm/i915/gt/selftest_execlists.c
1175
err = wait_for_submit(engine, rq[A2], HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1188
rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1190
if (IS_ERR(rq[2]))
drivers/gpu/drm/i915/gt/selftest_execlists.c
1193
err = wait_for_submit(engine, rq[B1], HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1202
while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
drivers/gpu/drm/i915/gt/selftest_execlists.c
1209
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
drivers/gpu/drm/i915/gt/selftest_execlists.c
1210
GEM_BUG_ON(!i915_request_is_active(rq[B1]));
drivers/gpu/drm/i915/gt/selftest_execlists.c
1211
GEM_BUG_ON(i915_request_is_active(rq[A2]));
drivers/gpu/drm/i915/gt/selftest_execlists.c
1250
i915_request_put(rq[i]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1262
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1264
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1265
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
1266
return rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1268
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1269
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1271
return rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
128
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1333
struct i915_request *rq, *nop;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1342
rq = semaphore_queue(engine, vma, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1343
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1344
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1347
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1348
err = wait_for_submit(engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
136
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1369
GEM_BUG_ON(i915_request_completed(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
137
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1370
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1373
err = release_queue(engine, vma, 1, effective_prio(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
138
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1384
if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1397
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
142
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
143
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1433
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1450
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1452
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1453
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1457
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1458
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1460
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1461
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1466
set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1467
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1477
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1479
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1480
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1484
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1485
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1486
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1492
if (wait_for_submit(engine, rq, HZ / 2)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1493
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1503
if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1508
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1724
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1730
rq = igt_spinner_create_request(spin, ce, arb);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1732
return rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1762
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1772
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
drivers/gpu/drm/i915/gt/selftest_execlists.c
1774
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1775
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1779
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1780
if (!igt_wait_for_spinner(&spin_lo, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1788
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
drivers/gpu/drm/i915/gt/selftest_execlists.c
1790
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1792
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1796
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1797
if (!igt_wait_for_spinner(&spin_hi, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
185
struct i915_request *rq[2];
drivers/gpu/drm/i915/gt/selftest_execlists.c
1855
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
1865
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
drivers/gpu/drm/i915/gt/selftest_execlists.c
1867
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1868
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1872
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1873
if (!igt_wait_for_spinner(&spin_lo, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1878
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
drivers/gpu/drm/i915/gt/selftest_execlists.c
1880
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1882
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1886
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1887
if (igt_wait_for_spinner(&spin_hi, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
1893
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
1895
if (!igt_wait_for_spinner(&spin_hi, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2061
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2071
rq = spinner_create_request(&arg->a.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2074
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2075
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2077
clear_bit(CONTEXT_BANNED, &rq->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2078
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2079
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2080
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2085
intel_context_ban(rq->context, rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2090
err = wait_for_reset(arg->engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2097
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2105
struct i915_request *rq[2] = {};
drivers/gpu/drm/i915/gt/selftest_execlists.c
2115
rq[0] = spinner_create_request(&arg->a.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2118
if (IS_ERR(rq[0]))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2119
return PTR_ERR(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2121
clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2122
i915_request_get(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2123
i915_request_add(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2124
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2129
rq[1] = spinner_create_request(&arg->b.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2132
if (IS_ERR(rq[1])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2133
err = PTR_ERR(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2137
clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2138
i915_request_get(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2139
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2140
i915_request_add(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2144
intel_context_ban(rq[1]->context, rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2150
err = wait_for_reset(arg->engine, rq[1], HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2154
if (rq[0]->fence.error != 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2160
if (rq[1]->fence.error != -EIO) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2167
i915_request_put(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2168
i915_request_put(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2176
struct i915_request *rq[3] = {};
drivers/gpu/drm/i915/gt/selftest_execlists.c
2186
rq[0] = spinner_create_request(&arg->a.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2189
if (IS_ERR(rq[0]))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2190
return PTR_ERR(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2192
clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2193
i915_request_get(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2194
i915_request_add(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2195
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2200
rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2201
if (IS_ERR(rq[1])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2202
err = PTR_ERR(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2206
clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2207
i915_request_get(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2208
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2209
i915_request_add(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2213
rq[2] = spinner_create_request(&arg->b.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2216
if (IS_ERR(rq[2])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2217
err = PTR_ERR(rq[2]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2221
i915_request_get(rq[2]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2222
err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2223
i915_request_add(rq[2]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2227
intel_context_ban(rq[2]->context, rq[2]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2232
err = wait_for_reset(arg->engine, rq[2], HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2236
if (rq[0]->fence.error != -EIO) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2247
if (intel_engine_has_semaphores(rq[1]->engine) &&
drivers/gpu/drm/i915/gt/selftest_execlists.c
2248
rq[1]->fence.error != 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2254
if (rq[2]->fence.error != -EIO) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2261
i915_request_put(rq[2]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2262
i915_request_put(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2263
i915_request_put(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2271
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2282
rq = spinner_create_request(&arg->a.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2285
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2286
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2288
clear_bit(CONTEXT_BANNED, &rq->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2289
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2290
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2291
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2296
intel_context_ban(rq->context, rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2301
err = wait_for_reset(arg->engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2308
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
231
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
232
if (IS_ERR(rq[0])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2328
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
233
err = PTR_ERR(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2338
rq = spinner_create_request(&arg->a.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2341
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2342
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2344
clear_bit(CONTEXT_BANNED, &rq->context->flags);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2345
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2346
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2347
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2352
intel_context_set_banned(rq->context);
drivers/gpu/drm/i915/gt/selftest_execlists.c
237
i915_request_get(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2370
err = wait_for_reset(engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2379
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
238
i915_request_add(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
239
GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
drivers/gpu/drm/i915/gt/selftest_execlists.c
241
if (!igt_wait_for_spinner(&spin, rq[0])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
242
i915_request_put(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
246
rq[1] = i915_request_create(ce[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
247
if (IS_ERR(rq[1])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
248
err = PTR_ERR(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
249
i915_request_put(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2579
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2585
rq = spinner_create_request(&lo.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2588
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2591
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2592
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2594
ring_size = rq->wa_tail - rq->head;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2596
ring_size += rq->ring->size;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2597
ring_size = rq->ring->size / ring_size;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2602
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2604
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2607
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2615
rq = spinner_create_request(&hi.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2618
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2620
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2621
if (!igt_wait_for_spinner(&hi.spin, rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2624
rq = spinner_create_request(&lo.spin,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2627
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2629
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2632
rq = igt_request_alloc(lo.ctx, engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2633
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2635
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2638
rq = igt_request_alloc(hi.ctx, engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2639
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
264
i915_request_await_dma_fence(rq[1], &rq[0]->fence);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2642
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2643
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2644
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2647
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2655
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2659
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2661
rq = igt_request_alloc(lo.ctx, engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2662
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
2665
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2666
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2668
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
267
i915_request_get(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2677
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
268
i915_request_add(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2680
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
269
GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
drivers/gpu/drm/i915/gt/selftest_execlists.c
270
i915_request_put(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2709
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2764
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2765
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2766
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2770
rq->batch = i915_vma_get(vma);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2771
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2773
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2775
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_execlists.c
2778
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
278
engine->sched_engine->schedule(rq[1], &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2785
rq->mock.link.next = &(*prev)->mock.link;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2786
*prev = rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2790
i915_vma_put(rq->batch);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2791
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2804
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
282
rq[0] = i915_request_create(ce[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
283
if (IS_ERR(rq[0])) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2836
rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2837
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2838
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
284
err = PTR_ERR(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2842
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2843
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2844
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2846
if (!igt_wait_for_spinner(spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2848
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
285
i915_request_put(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2855
while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2861
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2875
rq->tail);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2876
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2879
rq = intel_context_create_request(ce[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2880
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
2881
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2885
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2886
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2887
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2889
err = wait_for_submit(engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
289
i915_request_await_dma_fence(rq[0], &rq[1]->fence);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2890
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
290
i915_request_get(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
291
i915_request_add(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
292
GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
drivers/gpu/drm/i915/gt/selftest_execlists.c
293
i915_request_put(rq[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
294
i915_request_put(rq[0]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2978
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gt/selftest_execlists.c
2994
err = create_gang(engine, &rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
2999
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3011
cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3014
i915_gem_object_unpin_map(rq->batch->obj);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3020
while (rq) { /* wait for each rq from highest to lowest prio */
drivers/gpu/drm/i915/gt/selftest_execlists.c
3021
struct i915_request *n = list_next_entry(rq, mock.link);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3023
if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3028
prio, rq_prio(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
3035
i915_vma_put(rq->batch);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3036
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3037
rq = n;
drivers/gpu/drm/i915/gt/selftest_execlists.c
31
static bool is_active(struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_execlists.c
3156
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3179
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3180
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3181
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3185
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3189
err = i915_vma_move_to_active(batch, rq, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3191
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_execlists.c
3198
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3199
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3207
return err ? ERR_PTR(err) : rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3217
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3221
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3222
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
3223
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3225
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3227
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3236
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3238
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3239
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3241
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3243
if (i915_request_wait(rq, 0, HZ / 2) < 0)
drivers/gpu/drm/i915/gt/selftest_execlists.c
3245
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
33
if (i915_request_is_active(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
3302
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3304
rq = create_gpr_client(engine, global,
drivers/gpu/drm/i915/gt/selftest_execlists.c
3306
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3307
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3311
client[i] = rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3401
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3406
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
drivers/gpu/drm/i915/gt/selftest_execlists.c
3408
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3409
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3413
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3414
if (!igt_wait_for_spinner(&spin_lo, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3420
rq = igt_request_alloc(ctx_hi, engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3421
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3423
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3434
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3435
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3440
if (i915_request_wait(rq, 0, HZ / 10) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3442
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3448
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
347
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3493
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3513
rq = igt_request_alloc(ctx, smoke->engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3514
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3515
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3520
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3522
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_execlists.c
3527
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
36
if (i915_request_on_hold(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
3755
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3757
rq = i915_request_create(ve[nc]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3758
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3759
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3765
request[nc] = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3766
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3772
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
3774
rq = i915_request_create(ve[nc]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3775
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
3776
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3782
request[nc] = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
3783
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
386
rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
387
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
388
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
39
if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
392
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
393
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
394
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
396
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
398
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4028
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
4047
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4049
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4050
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4054
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
406
rq->wa_tail,
drivers/gpu/drm/i915/gt/selftest_execlists.c
4063
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4065
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4066
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4070
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4071
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4072
if (i915_request_wait(rq, 0, timeout) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4074
__func__, rq->engine->name);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4079
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4095
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
4115
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4117
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4118
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4122
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
413
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4132
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4134
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4135
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4139
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4140
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4141
if (i915_request_wait(rq, 0, timeout) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4148
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4227
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
4229
rq = i915_request_create(ve);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4230
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4231
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4236
last = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4238
cs = intel_ring_begin(rq, 8);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4240
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4255
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4258
rq->execution_mask = engine->mask;
drivers/gpu/drm/i915/gt/selftest_execlists.c
4259
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
427
rq->tail);
drivers/gpu/drm/i915/gt/selftest_execlists.c
429
rq->tail,
drivers/gpu/drm/i915/gt/selftest_execlists.c
431
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4338
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
434
rq = intel_context_create_request(ce[1]);
drivers/gpu/drm/i915/gt/selftest_execlists.c
435
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
436
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4360
rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4361
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4362
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4365
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4367
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4373
engine = rq->engine;
drivers/gpu/drm/i915/gt/selftest_execlists.c
4382
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4388
GEM_BUG_ON(rq->engine != engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4391
execlists_hold(engine, rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4392
GEM_BUG_ON(!i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
4395
GEM_BUG_ON(rq->fence.error != -EIO);
drivers/gpu/drm/i915/gt/selftest_execlists.c
440
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_execlists.c
4401
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4402
if (!i915_request_wait(rq, 0, HZ / 5)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
4409
GEM_BUG_ON(!i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
441
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4412
execlists_unhold(engine, rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4413
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
442
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
4421
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
444
err = wait_for_submit(engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
445
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
46
struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_execlists.c
496
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
536
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
539
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
540
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
543
GEM_BUG_ON(!rq->head);
drivers/gpu/drm/i915/gt/selftest_execlists.c
544
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
56
if (i915_request_completed(rq)) /* that was quick! */
drivers/gpu/drm/i915/gt/selftest_execlists.c
604
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
61
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
614
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_execlists.c
615
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
616
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
619
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
621
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
634
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
636
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
637
execlists_hold(engine, rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
638
GEM_BUG_ON(!i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
641
GEM_BUG_ON(rq->fence.error != -EIO);
drivers/gpu/drm/i915/gt/selftest_execlists.c
646
if (!i915_request_wait(rq, 0, HZ / 5)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
649
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
653
GEM_BUG_ON(!i915_request_on_hold(rq));
drivers/gpu/drm/i915/gt/selftest_execlists.c
656
execlists_unhold(engine, rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
657
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
663
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
72
struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_execlists.c
723
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
731
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
733
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
734
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
738
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
739
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
741
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
746
cs = intel_ring_begin(rq, 2);
drivers/gpu/drm/i915/gt/selftest_execlists.c
748
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
761
client[i] = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
762
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
823
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
drivers/gpu/drm/i915/gt/selftest_execlists.c
827
cs = intel_ring_begin(rq, 10);
drivers/gpu/drm/i915/gt/selftest_execlists.c
84
if (i915_request_completed(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
855
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_execlists.c
863
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
87
if (READ_ONCE(rq->fence.error))
drivers/gpu/drm/i915/gt/selftest_execlists.c
870
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_execlists.c
871
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
875
if (rq->engine->emit_init_breadcrumb)
drivers/gpu/drm/i915/gt/selftest_execlists.c
876
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
878
err = emit_semaphore_chain(rq, vma, idx);
drivers/gpu/drm/i915/gt/selftest_execlists.c
880
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
881
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
883
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/selftest_execlists.c
887
return rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
898
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
901
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_execlists.c
902
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_execlists.c
903
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
905
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/selftest_execlists.c
907
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
91
if (rq->fence.error != -EIO) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
916
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_execlists.c
918
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
919
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
922
engine->sched_engine->schedule(rq, &attr);
drivers/gpu/drm/i915/gt/selftest_execlists.c
925
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
94
rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_execlists.c
949
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_execlists.c
95
rq->fence.seqno);
drivers/gpu/drm/i915/gt/selftest_execlists.c
951
rq = semaphore_queue(engine, vma, n++);
drivers/gpu/drm/i915/gt/selftest_execlists.c
952
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_execlists.c
953
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_execlists.c
957
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
100
offset_in_page(sizeof(u32) * rq->fence.context);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1048
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1060
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1061
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1062
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1068
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1069
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1071
if (!wait_until_running(&h, rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1075
__func__, rq->fence.seqno, hws_seqno(&h, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1079
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
109
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1096
if (rq) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1098
err = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1101
engine->name, rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1102
rq->fence.seqno, rq->context->guc_id.id, err);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1107
if (rq) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1108
if (rq->fence.error != -EIO) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1111
rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1112
rq->fence.seqno, rq->context->guc_id.id);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1113
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1121
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1128
rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1129
rq->fence.seqno);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1132
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1140
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1300
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1321
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1322
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1323
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1328
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1329
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1331
if (!wait_until_running(&h, rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1335
__func__, rq->fence.seqno, hws_seqno(&h, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1336
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1346
timeout = i915_request_wait(rq, 0, 10);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1361
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1434
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1478
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1479
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1480
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1492
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1502
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1507
err = igt_vma_move_to_active_unlocked(arg.vma, rq, flags);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1515
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1516
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1520
if (!wait_until_running(&h, rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1524
__func__, rq->fence.seqno, hws_seqno(&h, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1525
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1544
if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1548
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1556
fake_hangcheck(gt, rq->engine->mask);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
157
rq = igt_request_alloc(h->ctx, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1570
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
158
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
159
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
163
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
167
err = igt_vma_move_to_active_unlocked(hws, rq, 0);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1683
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1686
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1687
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1688
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1693
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1694
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1710
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1727
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
174
*batch++ = lower_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1741
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1747
if (rq->fence.error) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1749
rq->fence.error);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
175
*batch++ = upper_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1750
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1758
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
176
*batch++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1765
prev = rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1813
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1833
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1834
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1835
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1840
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1841
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1843
if (!wait_until_running(&h, rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1847
__func__, rq->fence.seqno, hws_seqno(&h, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1848
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1863
if (rq->fence.error != -EIO) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1870
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
189
*batch++ = lower_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
190
*batch++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1912
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1926
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1927
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1928
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1933
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1934
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1936
if (wait_until_running(&h, rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1941
rq->fence.seqno, hws_seqno(&h, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1950
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
1955
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
202
*batch++ = lower_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
203
*batch++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
214
*batch++ = lower_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
215
*batch++ = rq->fence.seqno;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
228
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
229
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
238
err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
242
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
243
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
250
return err ? ERR_PTR(err) : rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
253
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
255
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
274
static bool wait_until_running(struct hang *h, struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
276
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
277
rq->fence.seqno),
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
279
wait_for(i915_seqno_passed(hws_seqno(h, rq),
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
280
rq->fence.seqno),
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
287
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
306
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
307
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
308
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
314
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
319
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
323
timeout = i915_request_wait(rq, 0,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
328
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
375
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
377
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
378
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
379
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
385
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
474
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
476
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
477
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
492
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
496
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
595
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
597
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
598
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
615
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
621
last = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
622
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
730
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
742
rq = hang_create_request(&h, engine);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
743
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
744
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
750
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
751
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
753
if (!wait_until_running(&h, rq)) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
757
__func__, rq->fence.seqno, hws_seqno(&h, rq));
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
761
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
776
if (rq) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
778
err = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
781
engine->name, rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
782
rq->fence.seqno, rq->context->guc_id.id, err);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
786
if (rq)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
787
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
868
static int active_request_put(struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
872
if (!rq)
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
875
if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
877
rq->engine->name,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
878
rq->fence.context,
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
879
rq->fence.seqno);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
882
intel_gt_set_wedged(rq->engine->gt);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
886
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
896
struct i915_request *rq[8] = {};
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
897
struct intel_context *ce[ARRAY_SIZE(rq)];
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
915
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
916
struct i915_request *old = rq[idx];
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
926
rq[idx] = i915_request_get(new);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
934
engine->sched_engine->schedule(rq[idx], &attr);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
946
for (count = 0; count < ARRAY_SIZE(rq); count++) {
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
947
int err__ = active_request_put(rq[count]);
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
97
const struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_lrc.c
102
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_lrc.c
104
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_lrc.c
105
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1066
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1076
rq = ERR_CAST(b_after);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1080
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1081
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
1084
err = igt_vma_move_to_active_unlocked(before, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1088
err = igt_vma_move_to_active_unlocked(b_before, rq, 0);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1092
err = igt_vma_move_to_active_unlocked(after, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1096
err = igt_vma_move_to_active_unlocked(b_after, rq, 0);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1100
cs = intel_ring_begin(rq, 14);
drivers/gpu/drm/i915/gt/selftest_lrc.c
111
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1127
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1130
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1131
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1136
return rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1139
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1140
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/selftest_lrc.c
115
rq = intel_engine_create_kernel_request(ce->engine);
drivers/gpu/drm/i915/gt/selftest_lrc.c
116
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
117
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
121
i915_request_await_dma_fence(rq, fence);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1219
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1228
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1229
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1230
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1234
err = igt_vma_move_to_active_unlocked(batch, rq, 0);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1238
cs = intel_ring_begin(rq, 8);
drivers/gpu/drm/i915/gt/selftest_lrc.c
125
rq = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1255
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1257
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1259
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
126
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
127
if (i915_request_wait(rq, 0, timeout) < 0)
drivers/gpu/drm/i915/gt/selftest_lrc.c
129
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1415
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1440
rq = record_registers(A, ref[0], ref[1], sema);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1441
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1442
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1449
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1450
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1454
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1468
rq = record_registers(A, result[0], result[1], sema);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1469
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1470
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1475
if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1483
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1569
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1572
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1573
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
1574
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1576
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1577
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1579
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/gt/selftest_lrc.c
1582
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1751
struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_lrc.c
1760
if (!rq->fence.error)
drivers/gpu/drm/i915/gt/selftest_lrc.c
1772
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1784
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1785
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1786
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1790
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1791
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1792
return rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1888
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
1903
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1904
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
1905
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1910
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1912
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1918
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1921
err = i915_request_wait(rq, 0, HZ / 5);
drivers/gpu/drm/i915/gt/selftest_lrc.c
1945
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
39
static bool is_active(struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_lrc.c
407
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
41
if (i915_request_is_active(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
431
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
432
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
433
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
437
cs = intel_ring_begin(rq, 4 * MAX_IDX);
drivers/gpu/drm/i915/gt/selftest_lrc.c
44
if (i915_request_on_hold(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
440
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
456
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
458
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
459
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
466
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
47
if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
489
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
535
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
539
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
54
struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_lrc.c
540
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
541
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
543
cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
drivers/gpu/drm/i915/gt/selftest_lrc.c
545
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
556
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_lrc.c
558
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/gt/selftest_lrc.c
559
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
570
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
575
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
576
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
577
return rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
579
cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
drivers/gpu/drm/i915/gt/selftest_lrc.c
581
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
603
err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_lrc.c
605
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
606
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
608
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
609
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/selftest_lrc.c
612
return rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
621
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
637
rq = __gpr_read(ce, scratch, slot);
drivers/gpu/drm/i915/gt/selftest_lrc.c
638
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
639
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
64
if (i915_request_completed(rq)) /* that was quick! */
drivers/gpu/drm/i915/gt/selftest_lrc.c
643
err = wait_for_submit(engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_lrc.c
656
err = wait_for_submit(engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_lrc.c
664
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_lrc.c
69
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
691
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
743
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
747
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
748
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
749
return rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
751
cs = intel_ring_begin(rq, 10);
drivers/gpu/drm/i915/gt/selftest_lrc.c
769
*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
drivers/gpu/drm/i915/gt/selftest_lrc.c
773
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_lrc.c
777
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
778
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
780
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
784
return rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
801
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
806
rq = create_timestamp(arg->ce[0], slot, 1);
drivers/gpu/drm/i915/gt/selftest_lrc.c
807
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
808
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
810
err = wait_for_submit(rq->engine, rq, HZ / 2);
drivers/gpu/drm/i915/gt/selftest_lrc.c
84
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_lrc.c
846
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
87
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_lrc.c
88
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_lrc.c
89
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_lrc.c
91
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/selftest_lrc.c
93
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
107
if (rq) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
108
if (i915_request_wait(rq, 0, HZ) < 0) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
112
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
147
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_migrate.c
163
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_migrate.c
164
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
165
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
170
err = i915_request_await_deps(rq, deps);
drivers/gpu/drm/i915/gt/selftest_migrate.c
174
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
175
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
184
err = emit_no_arbitration(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
188
len = emit_pte(rq, &it, pat_index, true, offset, CHUNK_SZ);
drivers/gpu/drm/i915/gt/selftest_migrate.c
194
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/selftest_migrate.c
198
err = emit_copy_ccs(rq, offset, dst_access,
drivers/gpu/drm/i915/gt/selftest_migrate.c
203
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/selftest_migrate.c
209
*out = i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
210
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
265
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/gt/selftest_migrate.c
304
true, &rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
305
if (rq && !err) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
306
if (i915_request_wait(rq, 0, HZ) < 0) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
311
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
312
rq = NULL;
drivers/gpu/drm/i915/gt/selftest_migrate.c
318
err = fn(migrate, &ww, obj, val, &rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
319
if (rq && !err) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
320
if (i915_request_wait(rq, 0, HZ) < 0) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
324
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
325
rq = NULL;
drivers/gpu/drm/i915/gt/selftest_migrate.c
355
false, &rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
356
if (rq && !err) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
357
if (i915_request_wait(rq, 0, HZ) < 0) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
362
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
363
rq = NULL;
drivers/gpu/drm/i915/gt/selftest_migrate.c
398
if (rq && err != -EINVAL) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
399
i915_request_wait(rq, 0, HZ);
drivers/gpu/drm/i915/gt/selftest_migrate.c
400
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
46
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_migrate.c
553
struct i915_request *rq, *prev;
drivers/gpu/drm/i915/gt/selftest_migrate.c
590
rq = igt_spinner_create_request(&st.spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_migrate.c
591
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
592
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
596
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
597
if (!igt_wait_for_spinner(&st.spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
614
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
616
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_migrate.c
617
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
618
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
622
sz = (rq->ring->space - rq->reserved_space) / sizeof(u32) -
drivers/gpu/drm/i915/gt/selftest_migrate.c
624
sz = min_t(u32, sz, (SZ_1K - rq->reserved_space) / sizeof(u32) -
drivers/gpu/drm/i915/gt/selftest_migrate.c
626
cs = intel_ring_begin(rq, sz);
drivers/gpu/drm/i915/gt/selftest_migrate.c
634
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_migrate.c
636
pr_info("%s emit=%u sz=%d\n", __func__, rq->ring->emit, sz);
drivers/gpu/drm/i915/gt/selftest_migrate.c
638
prev = rq;
drivers/gpu/drm/i915/gt/selftest_migrate.c
639
} while (rq->ring->space > (rq->reserved_space +
drivers/gpu/drm/i915/gt/selftest_migrate.c
649
pr_info("%s emite_pte ring space=%u\n", __func__, rq->ring->space);
drivers/gpu/drm/i915/gt/selftest_migrate.c
651
len = emit_pte(rq, &it, obj->pat_index, false, 0, CHUNK_SZ);
drivers/gpu/drm/i915/gt/selftest_migrate.c
662
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
drivers/gpu/drm/i915/gt/selftest_migrate.c
861
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_migrate.c
867
is_lmem, 0, &rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
868
if (rq) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
869
if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
drivers/gpu/drm/i915/gt/selftest_migrate.c
871
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
90
err = fn(migrate, &ww, src, dst, &rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
940
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_migrate.c
950
&rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
951
if (rq) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
952
if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
drivers/gpu/drm/i915/gt/selftest_migrate.c
954
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_migrate.c
96
if (rq) {
drivers/gpu/drm/i915/gt/selftest_migrate.c
97
i915_request_wait(rq, 0, HZ);
drivers/gpu/drm/i915/gt/selftest_migrate.c
98
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
102
static int read_regs(struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_mocs.c
111
cs = intel_ring_begin(rq, 4 * count);
drivers/gpu/drm/i915/gt/selftest_mocs.c
125
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_mocs.c
130
static int read_mocs_table(struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_mocs.c
134
struct intel_gt *gt = rq->engine->gt;
drivers/gpu/drm/i915/gt/selftest_mocs.c
140
if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
drivers/gpu/drm/i915/gt/selftest_mocs.c
143
addr = mocs_offset(rq->engine);
drivers/gpu/drm/i915/gt/selftest_mocs.c
145
return read_regs(rq, addr, table->n_entries, offset);
drivers/gpu/drm/i915/gt/selftest_mocs.c
148
static int read_l3cc_table(struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_mocs.c
157
return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
drivers/gpu/drm/i915/gt/selftest_mocs.c
221
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_mocs.c
228
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_mocs.c
229
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_mocs.c
230
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
232
err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_mocs.c
237
err = read_mocs_table(rq, arg->mocs, &offset);
drivers/gpu/drm/i915/gt/selftest_mocs.c
239
err = read_l3cc_table(rq, arg->l3cc, &offset);
drivers/gpu/drm/i915/gt/selftest_mocs.c
243
err = request_add_sync(rq, err);
drivers/gpu/drm/i915/gt/selftest_mocs.c
324
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_mocs.c
331
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
drivers/gpu/drm/i915/gt/selftest_mocs.c
332
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_mocs.c
334
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
337
err = request_add_spin(rq, &spin);
drivers/gpu/drm/i915/gt/selftest_mocs.c
343
err = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
38
static int request_add_sync(struct i915_request *rq, int err)
drivers/gpu/drm/i915/gt/selftest_mocs.c
40
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
41
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
42
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/gt/selftest_mocs.c
44
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
49
static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
drivers/gpu/drm/i915/gt/selftest_mocs.c
53
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
54
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_mocs.c
55
if (spin && !igt_wait_for_spinner(spin, rq))
drivers/gpu/drm/i915/gt/selftest_mocs.c
57
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_rc6.c
158
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rc6.c
163
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_rc6.c
164
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_rc6.c
165
return ERR_CAST(rq);
drivers/gpu/drm/i915/gt/selftest_rc6.c
167
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/selftest_rc6.c
169
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rc6.c
174
if (GRAPHICS_VER(rq->i915) >= 8)
drivers/gpu/drm/i915/gt/selftest_rc6.c
181
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_rc6.c
183
result = rq->hwsp_seqno + 2;
drivers/gpu/drm/i915/gt/selftest_rc6.c
184
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_reset.c
60
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_reset.c
73
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/selftest_reset.c
75
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_reset.c
76
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_reset.c
79
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
73
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
76
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
77
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
78
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
80
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
81
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
83
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
85
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
1166
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
1177
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/gt/selftest_rps.c
1180
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
1182
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
1186
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
1188
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
1281
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
1298
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/gt/selftest_rps.c
1301
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
1302
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
1306
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
246
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
255
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/gt/selftest_rps.c
258
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
260
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
264
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
266
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
406
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
416
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/gt/selftest_rps.c
419
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
420
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
424
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
426
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
636
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
655
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_rps.c
656
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
657
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
661
err = i915_vma_move_to_active(vma, rq, 0);
drivers/gpu/drm/i915/gt/selftest_rps.c
663
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_rps.c
666
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
775
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
794
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_rps.c
795
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
796
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
800
err = i915_vma_move_to_active(vma, rq, 0);
drivers/gpu/drm/i915/gt/selftest_rps.c
802
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_rps.c
805
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
902
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_rps.c
910
rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
drivers/gpu/drm/i915/gt/selftest_rps.c
911
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_rps.c
912
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
914
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
915
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
917
if (!igt_wait_for_spinner(spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_rps.c
920
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
929
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
936
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
943
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_rps.c
952
GEM_BUG_ON(i915_request_completed(rq));
drivers/gpu/drm/i915/gt/selftest_rps.c
955
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_slpc.c
332
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_slpc.c
340
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/gt/selftest_slpc.c
343
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_slpc.c
344
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_slpc.c
349
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_slpc.c
351
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1027
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
1062
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1063
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1064
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1070
err = i915_sw_fence_await_dma_fence(&rq->submit,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1071
&watcher[0].rq->fence, 0,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1074
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1080
switch_tl_lock(rq, watcher[0].rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1081
err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1083
err = emit_read_hwsp(watcher[0].rq, /* before */
drivers/gpu/drm/i915/gt/selftest_timeline.c
1084
rq->fence.seqno, hwsp,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1086
switch_tl_lock(watcher[0].rq, rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1088
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1094
switch_tl_lock(rq, watcher[1].rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1095
err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1097
err = emit_read_hwsp(watcher[1].rq, /* after */
drivers/gpu/drm/i915/gt/selftest_timeline.c
1098
rq->fence.seqno, hwsp,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1100
switch_tl_lock(watcher[1].rq, rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1102
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1108
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1109
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1111
rq = wrap_timeline(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1114
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1115
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1119
err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1120
&rq->fence, 0,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1123
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1131
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1137
if (i915_request_wait(rq,
drivers/gpu/drm/i915/gt/selftest_timeline.c
1141
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1145
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1148
if (8 * watcher[1].rq->ring->emit >
drivers/gpu/drm/i915/gt/selftest_timeline.c
1149
3 * watcher[1].rq->ring->size)
drivers/gpu/drm/i915/gt/selftest_timeline.c
1190
struct i915_request *rq[3] = {};
drivers/gpu/drm/i915/gt/selftest_timeline.c
1203
for (i = 0; i < ARRAY_SIZE(rq); i++) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1218
rq[i] = i915_request_get(this);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1223
GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1225
if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1231
for (i = 0; i < ARRAY_SIZE(rq); i++) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1232
if (!i915_request_completed(rq[i])) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1240
for (i = 0; i < ARRAY_SIZE(rq); i++)
drivers/gpu/drm/i915/gt/selftest_timeline.c
1241
i915_request_put(rq[i]);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1266
struct i915_request *rq[3] = {};
drivers/gpu/drm/i915/gt/selftest_timeline.c
1290
for (i = 0; i < ARRAY_SIZE(rq); i++) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1305
rq[i] = i915_request_get(this);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1310
GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1312
if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1318
for (i = 0; i < ARRAY_SIZE(rq); i++) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1319
if (!i915_request_completed(rq[i])) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1328
for (i = 0; i < ARRAY_SIZE(rq); i++)
drivers/gpu/drm/i915/gt/selftest_timeline.c
1329
i915_request_put(rq[i]);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1366
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
1374
rq = checked_tl_write(tl, engine, count);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1375
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1377
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1381
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
1383
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
1397
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
454
static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
drivers/gpu/drm/i915/gt/selftest_timeline.c
458
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/selftest_timeline.c
462
if (GRAPHICS_VER(rq->i915) >= 8) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
467
} else if (GRAPHICS_VER(rq->i915) >= 4) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
479
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_timeline.c
487
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
492
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/selftest_timeline.c
503
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_timeline.c
504
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_timeline.c
507
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
509
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
drivers/gpu/drm/i915/gt/selftest_timeline.c
510
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
512
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
513
rq = ERR_PTR(err);
drivers/gpu/drm/i915/gt/selftest_timeline.c
519
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_timeline.c
521
return rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
552
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
560
rq = checked_tl_write(tl, engine, count);
drivers/gpu/drm/i915/gt/selftest_timeline.c
561
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
563
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
568
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
620
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
632
rq = checked_tl_write(tl, engine, count);
drivers/gpu/drm/i915/gt/selftest_timeline.c
634
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
636
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
641
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
692
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
698
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/selftest_timeline.c
699
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
700
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
707
err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
drivers/gpu/drm/i915/gt/selftest_timeline.c
710
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
716
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
drivers/gpu/drm/i915/gt/selftest_timeline.c
718
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
724
err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
drivers/gpu/drm/i915/gt/selftest_timeline.c
727
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
733
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
drivers/gpu/drm/i915/gt/selftest_timeline.c
735
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
744
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
746
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
774
static int emit_read_hwsp(struct i915_request *rq,
drivers/gpu/drm/i915/gt/selftest_timeline.c
778
const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
drivers/gpu/drm/i915/gt/selftest_timeline.c
781
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/gt/selftest_timeline.c
802
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_timeline.c
809
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
880
w->rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_timeline.c
882
if (IS_ERR(w->rq))
drivers/gpu/drm/i915/gt/selftest_timeline.c
883
return PTR_ERR(w->rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
887
switch_tl_lock(w->rq, NULL);
drivers/gpu/drm/i915/gt/selftest_timeline.c
895
struct i915_request *rq = fetch_and_zero(&w->rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
901
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
902
switch_tl_lock(NULL, rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
903
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
905
if (i915_request_wait(rq, 0, HZ) < 0) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
924
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
930
if (w->rq) {
drivers/gpu/drm/i915/gt/selftest_timeline.c
931
switch_tl_lock(NULL, w->rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
933
i915_request_add(w->rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
941
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/selftest_timeline.c
944
list_for_each_entry_safe(rq, rn, &tl->requests, link)
drivers/gpu/drm/i915/gt/selftest_timeline.c
945
if (!i915_request_retire(rq))
drivers/gpu/drm/i915/gt/selftest_timeline.c
952
static struct i915_request *wrap_timeline(struct i915_request *rq)
drivers/gpu/drm/i915/gt/selftest_timeline.c
954
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/selftest_timeline.c
956
u32 seqno = rq->fence.seqno;
drivers/gpu/drm/i915/gt/selftest_timeline.c
959
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
960
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_timeline.c
961
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_timeline.c
962
return rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
964
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
965
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
968
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
969
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_timeline.c
970
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_timeline.c
971
return rq;
drivers/gpu/drm/i915/gt/selftest_timeline.c
973
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
974
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_timeline.c
976
return rq;
drivers/gpu/drm/i915/gt/selftest_tlb.c
124
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_tlb.c
125
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_tlb.c
126
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_tlb.c
130
err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
drivers/gpu/drm/i915/gt/selftest_tlb.c
132
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_tlb.c
136
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_tlb.c
137
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_tlb.c
149
if (!i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/selftest_tlb.c
156
} else if (!i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/selftest_tlb.c
173
if (wait_for(i915_request_completed(rq), HZ / 2)) {
drivers/gpu/drm/i915/gt/selftest_tlb.c
182
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_tlb.c
44
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
104
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1243
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1302
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1303
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1304
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1309
ret = request_add_spin(rq, &spin);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1317
if (i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
1334
ret = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
135
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
136
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
137
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
141
err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
149
cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
161
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
163
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
169
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
271
struct i915_request **rq)
drivers/gpu/drm/i915/gt/selftest_workarounds.c
280
*rq = igt_spinner_create_request(spin, ce, MI_NOOP);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
283
if (IS_ERR(*rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
285
err = PTR_ERR(*rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
289
err = request_add_spin(*rq, spin);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
303
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
324
err = switch_to_scratch_context(engine, &spin, &rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
329
if (i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
340
err = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
37
static int request_add_sync(struct i915_request *rq, int err)
drivers/gpu/drm/i915/gt/selftest_workarounds.c
39
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
40
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
41
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/gt/selftest_workarounds.c
43
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
48
static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
drivers/gpu/drm/i915/gt/selftest_workarounds.c
52
i915_request_get(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
523
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
53
i915_request_add(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
54
if (spin && !igt_wait_for_spinner(spin, rq))
drivers/gpu/drm/i915/gt/selftest_workarounds.c
56
i915_request_put(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
619
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
620
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
621
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
626
err = engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
631
err = i915_vma_move_to_active(batch, rq, 0);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
635
err = i915_vma_move_to_active(scratch, rq,
drivers/gpu/drm/i915/gt/selftest_workarounds.c
640
err = engine->emit_bb_start(rq,
drivers/gpu/drm/i915/gt/selftest_workarounds.c
647
err = request_add_sync(rq, err);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
846
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
850
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
851
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/selftest_workarounds.c
852
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
854
err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
862
cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
880
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
883
return request_add_sync(rq, err);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
889
struct i915_request *rq;
drivers/gpu/drm/i915/gt/selftest_workarounds.c
922
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
923
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/selftest_workarounds.c
924
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
929
err = engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
934
err = igt_vma_move_to_active_unlocked(batch, rq, 0);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
939
err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
drivers/gpu/drm/i915/gt/selftest_workarounds.c
942
err = request_add_sync(rq, err);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
263
static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
268
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
277
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
285
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
291
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
292
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
293
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
296
err = ce->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
301
err = emit_gsc_fw_load(rq, gsc);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
305
err = ce->engine->emit_flush(rq, 0);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
308
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
311
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
313
i915_request_add(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
315
if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
318
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
100
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
147
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
162
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
163
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
164
err = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
170
err = i915_vma_move_to_active(pkt->bb_vma, rq, 0);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
173
err = i915_vma_move_to_active(pkt->heci_pkt_vma, rq, EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
177
engine = rq->context->engine;
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
179
err = engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
184
err = engine->emit_bb_start(rq, i915_vma_offset(pkt->bb_vma), PAGE_SIZE, 0);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
188
err = ce->engine->emit_flush(rq, 0);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
194
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
197
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
199
i915_request_add(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
206
if (wait_for(i915_request_started(rq), GSC_HECI_REPLY_LATENCY_MS))
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
209
if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
214
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
24
static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
28
cs = intel_ring_begin(rq, 8);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
41
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
51
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
63
rq = i915_request_create(ce);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
64
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
65
return PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
68
err = ce->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
73
err = emit_gsc_heci_pkt(rq, &pkt);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
78
err = ce->engine->emit_flush(rq, 0);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
81
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
84
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
86
i915_request_add(rq);
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
93
if (wait_for(i915_request_started(rq), GSC_HECI_REPLY_LATENCY_MS))
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
96
if (i915_request_wait(rq, 0, msecs_to_jiffies(GSC_HECI_REPLY_LATENCY_MS)) < 0)
drivers/gpu/drm/i915/gt/uc/intel_guc.h
533
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1824
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1833
list_for_each_entry_safe_reverse(rq, rn,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1836
if (i915_request_completed(rq))
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1839
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1840
__i915_request_unsubmit(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1843
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1844
if (rq_prio(rq) != prio) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1845
prio = rq_prio(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1850
list_add(&rq->sched.link, pl);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1851
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1860
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1888
rq = intel_context_get_active_request(ce);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1889
if (!rq) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1894
if (i915_request_started(rq))
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1898
head = intel_ring_wrap(ce->ring, rq->head);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1900
__i915_request_reset(rq, guilty);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1901
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1962
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1968
list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1969
i915_request_put(i915_request_mark_eio(rq));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1977
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2005
priolist_for_each_request_consume(rq, rn, p) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2006
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2008
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2010
i915_request_put(i915_request_mark_eio(rq));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2202
struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2205
GEM_BUG_ON(!list_empty(&rq->sched.link));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2206
list_add_tail(&rq->sched.link,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2208
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2213
struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2217
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2219
trace_i915_request_in(rq, 0);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2221
if (is_multi_lrc_rq(rq)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2222
if (multi_lrc_submit(rq)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2223
ret = guc_wq_item_append(guc, rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2225
ret = guc_add_request(guc, rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2228
guc_set_lrc_tail(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2229
ret = guc_add_request(guc, rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2238
static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2240
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2241
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2248
static void guc_submit_request(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2250
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2251
struct intel_guc *guc = gt_to_guc(rq->engine->gt);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2257
if (need_tasklet(guc, rq))
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2258
queue_request(sched_engine, rq, rq_prio(rq));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2259
else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3211
struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3214
request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3216
if (i915_sw_fence_signaled(&rq->submit)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3222
if (!i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3223
__i915_request_skip(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3224
guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3255
guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3722
static void add_to_context(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3724
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3725
u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3728
GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3731
list_move_tail(&rq->sched.link, &ce->guc_state.requests);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3733
if (rq->guc_prio == GUC_PRIO_INIT) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3734
rq->guc_prio = new_guc_prio;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3735
add_context_inflight_prio(ce, rq->guc_prio);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3736
} else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3737
sub_context_inflight_prio(ce, rq->guc_prio);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3738
rq->guc_prio = new_guc_prio;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3739
add_context_inflight_prio(ce, rq->guc_prio);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3746
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3750
if (rq->guc_prio != GUC_PRIO_INIT &&
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3751
rq->guc_prio != GUC_PRIO_FINI) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3752
sub_context_inflight_prio(ce, rq->guc_prio);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3755
rq->guc_prio = GUC_PRIO_FINI;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3758
static void remove_from_context(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3760
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3766
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3767
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3770
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3772
guc_prio_fini(rq, ce);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3777
i915_request_notify_execute_cb_imm(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3811
struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3813
might_lock(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3814
i915_sw_fence_complete(&rq->submit);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3819
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3830
list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3832
list_del(&rq->guc_fence_link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3833
irq_work_queue(&rq->submit_work);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
384
request_to_scheduling_context(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
386
return intel_context_to_parent(rq->context);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3877
static int guc_request_alloc(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3879
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3884
GEM_BUG_ON(!intel_context_is_pinned(rq->context));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3891
rq->reserved_space += GUC_REQUEST_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3902
ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3906
rq->reserved_space -= GUC_REQUEST_SIZE;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3979
init_irq_work(&rq->submit_work, submit_work_cb);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3980
i915_sw_fence_await(&rq->submit);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
3982
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4203
static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4206
static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4210
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4213
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4331
static void guc_bump_inflight_request_prio(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4334
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4343
if (rq->guc_prio == GUC_PRIO_FINI)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4346
if (!new_guc_prio_higher(rq->guc_prio, new_guc_prio))
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4349
if (rq->guc_prio != GUC_PRIO_INIT)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4350
sub_context_inflight_prio(ce, rq->guc_prio);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4352
rq->guc_prio = new_guc_prio;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4353
add_context_inflight_prio(ce, rq->guc_prio);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4360
static void guc_retire_inflight_request_prio(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4362
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
4365
guc_prio_fini(rq, ce);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5392
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5422
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5423
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5513
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5515
priolist_for_each_request(rq, pl)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5517
rq->context->guc_id.id,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5518
rq->fence.seqno);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5636
static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5640
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5646
cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5678
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5683
static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5687
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5693
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5722
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5728
__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5731
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5771
static inline bool skip_handshake(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5773
return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5778
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5781
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5787
if (unlikely(skip_handshake(rq))) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5796
cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5802
rq->fence.seqno,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5803
i915_request_active_timeline(rq)->hwsp_offset,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5816
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5822
__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5825
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5854
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5857
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5863
if (unlikely(skip_handshake(rq))) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5872
cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5878
rq->fence.seqno,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5879
i915_request_active_timeline(rq)->hwsp_offset,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5892
rq->tail = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
696
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
699
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
705
lockdep_assert_held(&rq->engine->sched_engine->lock);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
712
i915_request_put(i915_request_mark_eio(rq));
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
772
trace_i915_request_guc_submit(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
779
static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
781
int ret = __guc_add_request(guc, rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
784
guc->stalled_request = rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
791
static inline void guc_set_lrc_tail(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
793
rq->context->lrc_reg_state[CTX_RING_TAIL] =
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
794
intel_ring_set_tail(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
797
static inline int rq_prio(const struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
799
return rq->sched.attr.priority;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
802
static bool is_multi_lrc_rq(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
804
return intel_context_is_parallel(rq->context);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
807
static bool can_merge_rq(struct i915_request *rq,
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
810
return request_to_scheduling_context(rq) ==
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
850
static int __guc_wq_item_append(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
852
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
894
struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
896
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
902
ret = __guc_wq_item_append(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
904
guc->stalled_request = rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
911
static bool multi_lrc_submit(struct i915_request *rq)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
913
struct intel_context *ce = request_to_scheduling_context(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
915
intel_ring_set_tail(rq->ring, rq->tail);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
923
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
955
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
957
priolist_for_each_request_consume(rq, rn, p) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
958
if (last && !can_merge_rq(rq, last))
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
961
list_del_init(&rq->sched.link);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
963
__i915_request_submit(rq);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
965
trace_i915_request_in(rq, 0);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
966
last = rq;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
968
if (is_multi_lrc_rq(rq)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
975
if (multi_lrc_submit(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
11
static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
15
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
153
struct i915_request *spin_rq = NULL, *rq, *last = NULL;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
16
i915_request_add(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
17
if (spin && !igt_wait_for_spinner(spin, rq))
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
203
rq = nop_user_request(ce[context_index], spin_rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
204
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
205
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
206
rq = NULL;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
215
last = rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
240
rq = nop_user_request(ce[context_index], NULL);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
241
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
242
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
243
guc_err(guc, "Failed to steal guc_id %d: %pe\n", context_index, rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
248
ret = i915_request_wait(rq, 0, HZ);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
249
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
26
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
29
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
30
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
31
return rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
315
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
34
ret = i915_sw_fence_await_dma_fence(&rq->submit,
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
340
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
342
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
343
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
344
gt_err(gt, "Failed to create spinner request: %pe\n", rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
348
ret = request_add_spin(rq, &spin);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
362
ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq),
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
369
if (i915_request_completed(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
38
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
384
ret = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
391
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
43
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
44
i915_request_add(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
46
return rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
54
struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
86
rq = nop_user_request(ce, NULL);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
89
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
90
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
91
gt_err(gt, "Failed to create request %d: %pe\n", i, rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc.c
95
last[i] = rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
101
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
108
ret = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
109
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
127
rq = nop_request(engine);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
128
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
129
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
133
ret = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
134
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
17
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
19
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
20
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
21
return rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
23
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
24
i915_request_add(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
26
return rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
36
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
77
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
79
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
80
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
81
gt_err(gt, "Failed to create spinner request: %pe\n", rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
85
ret = request_add_spin(rq, &spin);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
87
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
94
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
103
return rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
106
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
114
struct i915_request *rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
126
rq = multi_lrc_nop_request(parent);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
127
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
128
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
129
gt_err(gt, "Failed creating requests: %pe\n", rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
133
ret = intel_selftest_wait_for_rq(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
137
i915_request_put(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
80
struct i915_request *rq, *child_rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
85
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
86
if (IS_ERR(rq))
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
87
return rq;
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
89
i915_request_get(rq);
drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
90
i915_request_add(rq);
drivers/gpu/drm/i915/gvt/scheduler.c
1082
struct i915_request *rq = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
1092
if (rq) {
drivers/gpu/drm/i915/gvt/scheduler.c
270
static inline bool is_gvt_request(struct i915_request *rq)
drivers/gpu/drm/i915/gvt/scheduler.c
272
return intel_context_force_single_submission(rq->context);
drivers/gpu/drm/i915/gvt/scheduler.c
297
struct i915_request *rq = data;
drivers/gpu/drm/i915/gvt/scheduler.c
299
shadow_ctx_notifier_block[rq->engine->id]);
drivers/gpu/drm/i915/gvt/scheduler.c
301
enum intel_engine_id ring_id = rq->engine->id;
drivers/gpu/drm/i915/gvt/scheduler.c
305
if (!is_gvt_request(rq)) {
drivers/gpu/drm/i915/gvt/scheduler.c
311
NULL, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
329
workload->vgpu, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
338
save_ring_hw_state(workload->vgpu, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
342
save_ring_hw_state(workload->vgpu, rq->engine);
drivers/gpu/drm/i915/gvt/scheduler.c
471
struct i915_request *rq;
drivers/gpu/drm/i915/gvt/scheduler.c
476
rq = i915_request_create(s->shadow[workload->engine->id]);
drivers/gpu/drm/i915/gvt/scheduler.c
477
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/gvt/scheduler.c
479
return PTR_ERR(rq);
drivers/gpu/drm/i915/gvt/scheduler.c
482
workload->req = i915_request_get(rq);
drivers/gpu/drm/i915/gvt/scheduler.c
603
struct i915_request *rq = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
605
(struct execlist_ring_context *)rq->context->lrc_reg_state;
drivers/gpu/drm/i915/gvt/scheduler.c
811
struct i915_request *rq;
drivers/gpu/drm/i915/gvt/scheduler.c
839
rq = fetch_and_zero(&workload->req);
drivers/gpu/drm/i915/gvt/scheduler.c
840
i915_request_put(rq);
drivers/gpu/drm/i915/gvt/scheduler.c
947
struct i915_request *rq = workload->req;
drivers/gpu/drm/i915/gvt/scheduler.c
961
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
drivers/gpu/drm/i915/gvt/scheduler.c
979
ring_base = rq->engine->mmio_base;
drivers/gpu/drm/i915/gvt/scheduler.c
983
context_page_num = rq->engine->context_size;
drivers/gpu/drm/i915/gvt/scheduler.c
986
if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
drivers/gpu/drm/i915/i915_active.c
1100
struct i915_request *rq)
drivers/gpu/drm/i915/i915_active.c
1106
fence = __i915_active_fence_set(active, &rq->fence);
drivers/gpu/drm/i915/i915_active.c
1108
err = i915_request_await_dma_fence(rq, fence);
drivers/gpu/drm/i915/i915_active.c
426
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
drivers/gpu/drm/i915/i915_active.c
428
u64 idx = i915_request_timeline(rq)->fence_context;
drivers/gpu/drm/i915/i915_active.c
429
struct dma_fence *fence = &rq->fence;
drivers/gpu/drm/i915/i915_active.c
719
int i915_request_await_active(struct i915_request *rq,
drivers/gpu/drm/i915/i915_active.c
723
return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
drivers/gpu/drm/i915/i915_active.c
968
void i915_request_add_active_barriers(struct i915_request *rq)
drivers/gpu/drm/i915/i915_active.c
970
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/i915_active.c
974
GEM_BUG_ON(!intel_context_is_barrier(rq->context));
drivers/gpu/drm/i915/i915_active.c
976
GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
drivers/gpu/drm/i915/i915_active.c
986
spin_lock_irqsave(&rq->lock, flags);
drivers/gpu/drm/i915/i915_active.c
989
smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
drivers/gpu/drm/i915/i915_active.c
990
list_add_tail((struct list_head *)node, &rq->fence.cb_list);
drivers/gpu/drm/i915/i915_active.c
992
spin_unlock_irqrestore(&rq->lock, flags);
drivers/gpu/drm/i915/i915_active.h
167
int i915_active_add_request(struct i915_active *ref, struct i915_request *rq);
drivers/gpu/drm/i915/i915_active.h
181
int i915_request_await_active(struct i915_request *rq,
drivers/gpu/drm/i915/i915_active.h
210
void i915_request_add_active_barriers(struct i915_request *rq);
drivers/gpu/drm/i915/i915_active.h
219
static inline int __i915_request_await_exclusive(struct i915_request *rq,
drivers/gpu/drm/i915/i915_active.h
227
err = i915_request_await_dma_fence(rq, fence);
drivers/gpu/drm/i915/i915_active.h
89
struct i915_request *rq);
drivers/gpu/drm/i915/i915_gpu_error.c
1545
const struct i915_request *rq,
drivers/gpu/drm/i915/i915_gpu_error.c
1550
for (c = rq->capture_list; c; c = c->next)
drivers/gpu/drm/i915/i915_gpu_error.c
1639
struct i915_request *rq,
drivers/gpu/drm/i915/i915_gpu_error.c
1644
vma = engine_coredump_add_context(ee, rq->context, gfp);
drivers/gpu/drm/i915/i915_gpu_error.c
1653
vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
drivers/gpu/drm/i915/i915_gpu_error.c
1654
vma = capture_user(vma, rq, gfp);
drivers/gpu/drm/i915/i915_gpu_error.c
1656
ee->rq_head = rq->head;
drivers/gpu/drm/i915/i915_gpu_error.c
1657
ee->rq_post = rq->postfix;
drivers/gpu/drm/i915/i915_gpu_error.c
1658
ee->rq_tail = rq->tail;
drivers/gpu/drm/i915/i915_gpu_error.c
1700
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/i915_gpu_error.c
1706
intel_engine_get_hung_entity(engine, &ce, &rq);
drivers/gpu/drm/i915/i915_gpu_error.c
1707
if (rq && !i915_request_started(rq)) {
drivers/gpu/drm/i915/i915_gpu_error.c
1716
engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
drivers/gpu/drm/i915/i915_gpu_error.c
1720
engine->name, rq->fence.context, rq->fence.seqno);
drivers/gpu/drm/i915/i915_gpu_error.c
1723
if (rq) {
drivers/gpu/drm/i915/i915_gpu_error.c
1724
capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
drivers/gpu/drm/i915/i915_gpu_error.c
1725
i915_request_put(rq);
drivers/gpu/drm/i915/i915_gpu_error.h
294
struct i915_request *rq,
drivers/gpu/drm/i915/i915_gpu_error.h
367
struct i915_request *rq,
drivers/gpu/drm/i915/i915_perf.c
1324
__store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
drivers/gpu/drm/i915/i915_perf.c
1329
if (GRAPHICS_VER(rq->i915) >= 8)
drivers/gpu/drm/i915/i915_perf.c
1332
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/i915_perf.c
1341
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/i915_perf.c
1349
struct i915_request *rq;
drivers/gpu/drm/i915/i915_perf.c
1352
rq = i915_request_create(ce);
drivers/gpu/drm/i915/i915_perf.c
1353
if (IS_ERR(rq))
drivers/gpu/drm/i915/i915_perf.c
1354
return PTR_ERR(rq);
drivers/gpu/drm/i915/i915_perf.c
1356
i915_request_get(rq);
drivers/gpu/drm/i915/i915_perf.c
1358
err = __store_reg_to_mem(rq, reg, ggtt_offset);
drivers/gpu/drm/i915/i915_perf.c
1360
i915_request_add(rq);
drivers/gpu/drm/i915/i915_perf.c
1361
if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
drivers/gpu/drm/i915/i915_perf.c
1364
i915_request_put(rq);
drivers/gpu/drm/i915/i915_perf.c
2311
struct i915_request *rq;
drivers/gpu/drm/i915/i915_perf.c
2331
rq = i915_request_create(ce);
drivers/gpu/drm/i915/i915_perf.c
2333
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/i915_perf.c
2334
err = PTR_ERR(rq);
drivers/gpu/drm/i915/i915_perf.c
2340
err = i915_request_await_active(rq, active,
drivers/gpu/drm/i915/i915_perf.c
2345
err = i915_active_add_request(active, rq);
drivers/gpu/drm/i915/i915_perf.c
2350
err = i915_vma_move_to_active(vma, rq, 0);
drivers/gpu/drm/i915/i915_perf.c
2354
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/i915_perf.c
2361
i915_request_add(rq);
drivers/gpu/drm/i915/i915_perf.c
2483
gen8_store_flex(struct i915_request *rq,
drivers/gpu/drm/i915/i915_perf.c
2490
cs = intel_ring_begin(rq, 4 * count);
drivers/gpu/drm/i915/i915_perf.c
2502
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/i915_perf.c
2508
gen8_load_flex(struct i915_request *rq,
drivers/gpu/drm/i915/i915_perf.c
2516
cs = intel_ring_begin(rq, 2 * count + 2);
drivers/gpu/drm/i915/i915_perf.c
2527
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/i915_perf.c
2535
struct i915_request *rq;
drivers/gpu/drm/i915/i915_perf.c
2538
rq = intel_engine_create_kernel_request(ce->engine);
drivers/gpu/drm/i915/i915_perf.c
2539
if (IS_ERR(rq))
drivers/gpu/drm/i915/i915_perf.c
2540
return PTR_ERR(rq);
drivers/gpu/drm/i915/i915_perf.c
2543
err = intel_context_prepare_remote_request(ce, rq);
drivers/gpu/drm/i915/i915_perf.c
2545
err = gen8_store_flex(rq, ce, flex, count);
drivers/gpu/drm/i915/i915_perf.c
2547
i915_request_add(rq);
drivers/gpu/drm/i915/i915_perf.c
2556
struct i915_request *rq;
drivers/gpu/drm/i915/i915_perf.c
2560
rq = i915_request_create(ce);
drivers/gpu/drm/i915/i915_perf.c
2562
if (IS_ERR(rq))
drivers/gpu/drm/i915/i915_perf.c
2563
return PTR_ERR(rq);
drivers/gpu/drm/i915/i915_perf.c
2566
err = i915_active_add_request(active, rq);
drivers/gpu/drm/i915/i915_perf.c
2571
err = gen8_load_flex(rq, ce, flex, count);
drivers/gpu/drm/i915/i915_perf.c
2576
i915_request_add(rq);
drivers/gpu/drm/i915/i915_request.c
1002
rq->head = rq->ring->emit;
drivers/gpu/drm/i915/i915_request.c
1004
ret = rq->engine->request_alloc(rq);
drivers/gpu/drm/i915/i915_request.c
1008
rq->infix = rq->ring->emit; /* end of header; start of user payload */
drivers/gpu/drm/i915/i915_request.c
1011
list_add_tail_rcu(&rq->link, &tl->requests);
drivers/gpu/drm/i915/i915_request.c
1013
return rq;
drivers/gpu/drm/i915/i915_request.c
1016
ce->ring->emit = rq->head;
drivers/gpu/drm/i915/i915_request.c
1019
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
drivers/gpu/drm/i915/i915_request.c
1020
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
drivers/gpu/drm/i915/i915_request.c
1023
kmem_cache_free(slab_requests, rq);
drivers/gpu/drm/i915/i915_request.c
1032
struct i915_request *rq;
drivers/gpu/drm/i915/i915_request.c
1040
rq = list_first_entry(&tl->requests, typeof(*rq), link);
drivers/gpu/drm/i915/i915_request.c
1041
if (!list_is_last(&rq->link, &tl->requests))
drivers/gpu/drm/i915/i915_request.c
1042
i915_request_retire(rq);
drivers/gpu/drm/i915/i915_request.c
1045
rq = __i915_request_create(ce, GFP_KERNEL);
drivers/gpu/drm/i915/i915_request.c
1047
if (IS_ERR(rq))
drivers/gpu/drm/i915/i915_request.c
1051
rq->cookie = lockdep_pin_lock(&tl->mutex);
drivers/gpu/drm/i915/i915_request.c
1053
return rq;
drivers/gpu/drm/i915/i915_request.c
1057
return rq;
drivers/gpu/drm/i915/i915_request.c
1061
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
drivers/gpu/drm/i915/i915_request.c
1066
if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
drivers/gpu/drm/i915/i915_request.c
1117
if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
drivers/gpu/drm/i915/i915_request.c
1118
err = i915_sw_fence_await_dma_fence(&rq->submit,
drivers/gpu/drm/i915/i915_request.c
1127
already_busywaiting(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1141
return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
drivers/gpu/drm/i915/i915_request.c
116
struct i915_request *rq = to_request(fence);
drivers/gpu/drm/i915/i915_request.c
118
GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
drivers/gpu/drm/i915/i915_request.c
119
rq->guc_prio != GUC_PRIO_FINI);
drivers/gpu/drm/i915/i915_request.c
121
i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
drivers/gpu/drm/i915/i915_request.c
122
if (rq->batch_res) {
drivers/gpu/drm/i915/i915_request.c
123
i915_vma_resource_put(rq->batch_res);
drivers/gpu/drm/i915/i915_request.c
124
rq->batch_res = NULL;
drivers/gpu/drm/i915/i915_request.c
1336
static void mark_external(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
134
i915_sw_fence_fini(&rq->submit);
drivers/gpu/drm/i915/i915_request.c
1346
rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
drivers/gpu/drm/i915/i915_request.c
135
i915_sw_fence_fini(&rq->semaphore);
drivers/gpu/drm/i915/i915_request.c
1350
__i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
drivers/gpu/drm/i915/i915_request.c
1352
mark_external(rq);
drivers/gpu/drm/i915/i915_request.c
1353
return i915_sw_fence_await_dma_fence(&rq->submit, fence,
drivers/gpu/drm/i915/i915_request.c
1359
i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
drivers/gpu/drm/i915/i915_request.c
1365
return __i915_request_await_external(rq, fence);
drivers/gpu/drm/i915/i915_request.c
1371
err = __i915_request_await_external(rq, iter);
drivers/gpu/drm/i915/i915_request.c
1375
err = i915_request_await_dma_fence(rq, chain->fence);
drivers/gpu/drm/i915/i915_request.c
1384
static inline bool is_parallel_rq(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1386
return intel_context_is_parallel(rq->context);
drivers/gpu/drm/i915/i915_request.c
1389
static inline struct intel_context *request_to_parent(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1391
return intel_context_to_parent(rq->context);
drivers/gpu/drm/i915/i915_request.c
1404
i915_request_await_execution(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
1426
if (fence->context == rq->fence.context)
drivers/gpu/drm/i915/i915_request.c
1435
if (is_same_parallel_context(rq, to_request(fence)))
drivers/gpu/drm/i915/i915_request.c
1437
ret = __i915_request_await_execution(rq,
drivers/gpu/drm/i915/i915_request.c
1440
ret = i915_request_await_external(rq, fence);
drivers/gpu/drm/i915/i915_request.c
1501
i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
drivers/gpu/drm/i915/i915_request.c
1533
if (fence->context == rq->fence.context)
drivers/gpu/drm/i915/i915_request.c
1538
intel_timeline_sync_is_later(i915_request_timeline(rq),
drivers/gpu/drm/i915/i915_request.c
1543
if (is_same_parallel_context(rq, to_request(fence)))
drivers/gpu/drm/i915/i915_request.c
1545
ret = i915_request_await_request(rq, to_request(fence));
drivers/gpu/drm/i915/i915_request.c
1547
ret = i915_request_await_external(rq, fence);
drivers/gpu/drm/i915/i915_request.c
1554
intel_timeline_sync_set(i915_request_timeline(rq),
drivers/gpu/drm/i915/i915_request.c
1569
int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
drivers/gpu/drm/i915/i915_request.c
1574
err = i915_request_await_dma_fence(rq, deps->fences[i]);
drivers/gpu/drm/i915/i915_request.c
1621
static void i915_request_await_huc(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1623
struct intel_huc *huc = &rq->context->engine->gt->uc.huc;
drivers/gpu/drm/i915/i915_request.c
1626
if (!rcu_access_pointer(rq->context->gem_context))
drivers/gpu/drm/i915/i915_request.c
1630
i915_sw_fence_await_sw_fence(&rq->submit,
drivers/gpu/drm/i915/i915_request.c
1632
&rq->hucq);
drivers/gpu/drm/i915/i915_request.c
1636
__i915_request_ensure_parallel_ordering(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
1641
GEM_BUG_ON(!is_parallel_rq(rq));
drivers/gpu/drm/i915/i915_request.c
1643
prev = request_to_parent(rq)->parallel.last_rq;
drivers/gpu/drm/i915/i915_request.c
1646
i915_sw_fence_await_sw_fence(&rq->submit,
drivers/gpu/drm/i915/i915_request.c
1648
&rq->submitq);
drivers/gpu/drm/i915/i915_request.c
1650
if (rq->engine->sched_engine->schedule)
drivers/gpu/drm/i915/i915_request.c
1651
__i915_sched_node_add_dependency(&rq->sched,
drivers/gpu/drm/i915/i915_request.c
1653
&rq->dep,
drivers/gpu/drm/i915/i915_request.c
1659
request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
drivers/gpu/drm/i915/i915_request.c
1667
&rq->fence));
drivers/gpu/drm/i915/i915_request.c
1671
__i915_request_ensure_ordering(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
1676
GEM_BUG_ON(is_parallel_rq(rq));
drivers/gpu/drm/i915/i915_request.c
1679
&rq->fence));
drivers/gpu/drm/i915/i915_request.c
168
if (is_power_of_2(rq->execution_mask) &&
drivers/gpu/drm/i915/i915_request.c
1682
bool uses_guc = intel_engine_uses_guc(rq->engine);
drivers/gpu/drm/i915/i915_request.c
1684
rq->engine->mask);
drivers/gpu/drm/i915/i915_request.c
1685
bool same_context = prev->context == rq->context;
drivers/gpu/drm/i915/i915_request.c
169
!cmpxchg(&rq->engine->request_pool, NULL, rq))
drivers/gpu/drm/i915/i915_request.c
1695
rq->fence.seqno));
drivers/gpu/drm/i915/i915_request.c
1698
i915_sw_fence_await_sw_fence(&rq->submit,
drivers/gpu/drm/i915/i915_request.c
1700
&rq->submitq);
drivers/gpu/drm/i915/i915_request.c
1702
__i915_sw_fence_await_dma_fence(&rq->submit,
drivers/gpu/drm/i915/i915_request.c
1704
&rq->dmaq);
drivers/gpu/drm/i915/i915_request.c
1705
if (rq->engine->sched_engine->schedule)
drivers/gpu/drm/i915/i915_request.c
1706
__i915_sched_node_add_dependency(&rq->sched,
drivers/gpu/drm/i915/i915_request.c
1708
&rq->dep,
drivers/gpu/drm/i915/i915_request.c
172
kmem_cache_free(slab_requests, rq);
drivers/gpu/drm/i915/i915_request.c
1720
__i915_request_add_to_timeline(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1722
struct intel_timeline *timeline = i915_request_timeline(rq);
drivers/gpu/drm/i915/i915_request.c
1732
if (rq->engine->class == VIDEO_DECODE_CLASS)
drivers/gpu/drm/i915/i915_request.c
1733
i915_request_await_huc(rq);
drivers/gpu/drm/i915/i915_request.c
1765
if (likely(!is_parallel_rq(rq)))
drivers/gpu/drm/i915/i915_request.c
1766
prev = __i915_request_ensure_ordering(rq, timeline);
drivers/gpu/drm/i915/i915_request.c
1768
prev = __i915_request_ensure_parallel_ordering(rq, timeline);
drivers/gpu/drm/i915/i915_request.c
1777
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
drivers/gpu/drm/i915/i915_request.c
1787
struct i915_request *__i915_request_commit(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1789
struct intel_engine_cs *engine = rq->engine;
drivers/gpu/drm/i915/i915_request.c
1790
struct intel_ring *ring = rq->ring;
drivers/gpu/drm/i915/i915_request.c
1793
RQ_TRACE(rq, "\n");
drivers/gpu/drm/i915/i915_request.c
1800
GEM_BUG_ON(rq->reserved_space > ring->space);
drivers/gpu/drm/i915/i915_request.c
1801
rq->reserved_space = 0;
drivers/gpu/drm/i915/i915_request.c
1802
rq->emitted_jiffies = jiffies;
drivers/gpu/drm/i915/i915_request.c
1810
cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
drivers/gpu/drm/i915/i915_request.c
1812
rq->postfix = intel_ring_offset(rq, cs);
drivers/gpu/drm/i915/i915_request.c
1814
return __i915_request_add_to_timeline(rq);
drivers/gpu/drm/i915/i915_request.c
1817
void __i915_request_queue_bh(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1819
i915_sw_fence_commit(&rq->semaphore);
drivers/gpu/drm/i915/i915_request.c
1820
i915_sw_fence_commit(&rq->submit);
drivers/gpu/drm/i915/i915_request.c
1823
void __i915_request_queue(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
1837
if (attr && rq->engine->sched_engine->schedule)
drivers/gpu/drm/i915/i915_request.c
1838
rq->engine->sched_engine->schedule(rq, attr);
drivers/gpu/drm/i915/i915_request.c
1841
__i915_request_queue_bh(rq);
drivers/gpu/drm/i915/i915_request.c
1845
void i915_request_add(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
1847
struct intel_timeline * const tl = i915_request_timeline(rq);
drivers/gpu/drm/i915/i915_request.c
1852
lockdep_unpin_lock(&tl->mutex, rq->cookie);
drivers/gpu/drm/i915/i915_request.c
1854
trace_i915_request_add(rq);
drivers/gpu/drm/i915/i915_request.c
1855
__i915_request_commit(rq);
drivers/gpu/drm/i915/i915_request.c
1859
ctx = rcu_dereference(rq->context->gem_context);
drivers/gpu/drm/i915/i915_request.c
1864
__i915_request_queue(rq, &attr);
drivers/gpu/drm/i915/i915_request.c
1902
static bool __i915_spin_request(struct i915_request * const rq, int state)
drivers/gpu/drm/i915/i915_request.c
1918
if (!i915_request_is_running(rq))
drivers/gpu/drm/i915/i915_request.c
193
__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
drivers/gpu/drm/i915/i915_request.c
1932
timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
drivers/gpu/drm/i915/i915_request.c
1935
if (dma_fence_is_signaled(&rq->fence))
drivers/gpu/drm/i915/i915_request.c
197
if (llist_empty(&rq->execute_cb))
drivers/gpu/drm/i915/i915_request.c
1981
long i915_request_wait_timeout(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
1992
if (dma_fence_is_signaled(&rq->fence))
drivers/gpu/drm/i915/i915_request.c
1998
trace_i915_request_wait_begin(rq, flags);
drivers/gpu/drm/i915/i915_request.c
2006
mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
drivers/gpu/drm/i915/i915_request.c
201
llist_del_all(&rq->execute_cb),
drivers/gpu/drm/i915/i915_request.c
2032
__i915_spin_request(rq, state))
drivers/gpu/drm/i915/i915_request.c
2047
if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
drivers/gpu/drm/i915/i915_request.c
2048
intel_rps_boost(rq);
drivers/gpu/drm/i915/i915_request.c
2051
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
drivers/gpu/drm/i915/i915_request.c
206
static void __notify_execute_cb_irq(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2069
if (i915_request_is_ready(rq))
drivers/gpu/drm/i915/i915_request.c
2070
__intel_engine_flush_submission(rq->engine, false);
drivers/gpu/drm/i915/i915_request.c
2075
if (dma_fence_is_signaled(&rq->fence))
drivers/gpu/drm/i915/i915_request.c
208
__notify_execute_cb(rq, irq_work_queue);
drivers/gpu/drm/i915/i915_request.c
2093
dma_fence_remove_callback(&rq->fence, &wait.cb);
drivers/gpu/drm/i915/i915_request.c
2097
mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
drivers/gpu/drm/i915/i915_request.c
2098
trace_i915_request_wait_end(rq);
drivers/gpu/drm/i915/i915_request.c
2120
long i915_request_wait(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
2124
long ret = i915_request_wait_timeout(rq, flags, timeout);
drivers/gpu/drm/i915/i915_request.c
2147
static char queue_status(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2149
if (i915_request_is_active(rq))
drivers/gpu/drm/i915/i915_request.c
2152
if (i915_request_is_ready(rq))
drivers/gpu/drm/i915/i915_request.c
2153
return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
drivers/gpu/drm/i915/i915_request.c
2158
static const char *run_status(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2160
if (__i915_request_is_complete(rq))
drivers/gpu/drm/i915/i915_request.c
2163
if (__i915_request_has_started(rq))
drivers/gpu/drm/i915/i915_request.c
2166
if (!i915_sw_fence_signaled(&rq->semaphore))
drivers/gpu/drm/i915/i915_request.c
217
void i915_request_notify_execute_cb_imm(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2172
static const char *fence_status(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2174
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
drivers/gpu/drm/i915/i915_request.c
2177
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
drivers/gpu/drm/i915/i915_request.c
2184
const struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
219
__notify_execute_cb(rq, irq_work_imm);
drivers/gpu/drm/i915/i915_request.c
222
static void __i915_request_fill(struct i915_request *rq, u8 val)
drivers/gpu/drm/i915/i915_request.c
2222
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
drivers/gpu/drm/i915/i915_request.c
2225
timeline = dma_fence_timeline_name((struct dma_fence *)&rq->fence);
drivers/gpu/drm/i915/i915_request.c
2228
queue_status(rq),
drivers/gpu/drm/i915/i915_request.c
2229
rq->fence.context, rq->fence.seqno,
drivers/gpu/drm/i915/i915_request.c
2230
run_status(rq),
drivers/gpu/drm/i915/i915_request.c
2231
fence_status(rq),
drivers/gpu/drm/i915/i915_request.c
2233
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
drivers/gpu/drm/i915/i915_request.c
2238
static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
224
void *vaddr = rq->ring->vaddr;
drivers/gpu/drm/i915/i915_request.c
2242
return ring == i915_ggtt_offset(rq->ring->vma);
drivers/gpu/drm/i915/i915_request.c
2245
static bool match_ring(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2251
if (!intel_engine_is_virtual(rq->engine))
drivers/gpu/drm/i915/i915_request.c
2252
return engine_match_ring(rq->engine, rq);
drivers/gpu/drm/i915/i915_request.c
2256
while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
drivers/gpu/drm/i915/i915_request.c
2257
found = engine_match_ring(engine, rq);
drivers/gpu/drm/i915/i915_request.c
2265
enum i915_request_state i915_test_request_state(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
2267
if (i915_request_completed(rq))
drivers/gpu/drm/i915/i915_request.c
227
head = rq->infix;
drivers/gpu/drm/i915/i915_request.c
2270
if (!i915_request_started(rq))
drivers/gpu/drm/i915/i915_request.c
2273
if (match_ring(rq))
drivers/gpu/drm/i915/i915_request.c
228
if (rq->postfix < head) {
drivers/gpu/drm/i915/i915_request.c
229
memset(vaddr + head, val, rq->ring->size - head);
drivers/gpu/drm/i915/i915_request.c
232
memset(vaddr + head, val, rq->postfix - head);
drivers/gpu/drm/i915/i915_request.c
246
i915_request_active_engine(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
259
locked = READ_ONCE(rq->engine);
drivers/gpu/drm/i915/i915_request.c
261
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
drivers/gpu/drm/i915/i915_request.c
267
if (i915_request_is_active(rq)) {
drivers/gpu/drm/i915/i915_request.c
268
if (!__i915_request_is_complete(rq))
drivers/gpu/drm/i915/i915_request.c
280
struct i915_request *rq =
drivers/gpu/drm/i915/i915_request.c
282
struct intel_gt *gt = rq->engine->gt;
drivers/gpu/drm/i915/i915_request.c
284
if (!i915_request_completed(rq)) {
drivers/gpu/drm/i915/i915_request.c
285
if (llist_add(&rq->watchdog.link, >->watchdog.list))
drivers/gpu/drm/i915/i915_request.c
288
i915_request_put(rq);
drivers/gpu/drm/i915/i915_request.c
294
static void __rq_init_watchdog(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
296
struct i915_request_watchdog *wdg = &rq->watchdog;
drivers/gpu/drm/i915/i915_request.c
301
static void __rq_arm_watchdog(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
303
struct i915_request_watchdog *wdg = &rq->watchdog;
drivers/gpu/drm/i915/i915_request.c
304
struct intel_context *ce = rq->context;
drivers/gpu/drm/i915/i915_request.c
309
i915_request_get(rq);
drivers/gpu/drm/i915/i915_request.c
318
static void __rq_cancel_watchdog(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
320
struct i915_request_watchdog *wdg = &rq->watchdog;
drivers/gpu/drm/i915/i915_request.c
323
i915_request_put(rq);
drivers/gpu/drm/i915/i915_request.c
358
bool i915_request_retire(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
360
if (!__i915_request_is_complete(rq))
drivers/gpu/drm/i915/i915_request.c
363
RQ_TRACE(rq, "\n");
drivers/gpu/drm/i915/i915_request.c
365
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
drivers/gpu/drm/i915/i915_request.c
366
trace_i915_request_retire(rq);
drivers/gpu/drm/i915/i915_request.c
367
i915_request_mark_complete(rq);
drivers/gpu/drm/i915/i915_request.c
369
__rq_cancel_watchdog(rq);
drivers/gpu/drm/i915/i915_request.c
380
GEM_BUG_ON(!list_is_first(&rq->link,
drivers/gpu/drm/i915/i915_request.c
381
&i915_request_timeline(rq)->requests));
drivers/gpu/drm/i915/i915_request.c
384
__i915_request_fill(rq, POISON_FREE);
drivers/gpu/drm/i915/i915_request.c
385
rq->ring->head = rq->postfix;
drivers/gpu/drm/i915/i915_request.c
387
if (!i915_request_signaled(rq)) {
drivers/gpu/drm/i915/i915_request.c
388
spin_lock_irq(&rq->lock);
drivers/gpu/drm/i915/i915_request.c
389
dma_fence_signal_locked(&rq->fence);
drivers/gpu/drm/i915/i915_request.c
390
spin_unlock_irq(&rq->lock);
drivers/gpu/drm/i915/i915_request.c
393
if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
drivers/gpu/drm/i915/i915_request.c
394
intel_rps_dec_waiters(&rq->engine->gt->rps);
drivers/gpu/drm/i915/i915_request.c
406
rq->engine->remove_active_request(rq);
drivers/gpu/drm/i915/i915_request.c
407
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
drivers/gpu/drm/i915/i915_request.c
409
__list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
drivers/gpu/drm/i915/i915_request.c
411
intel_context_exit(rq->context);
drivers/gpu/drm/i915/i915_request.c
412
intel_context_unpin(rq->context);
drivers/gpu/drm/i915/i915_request.c
414
i915_sched_node_fini(&rq->sched);
drivers/gpu/drm/i915/i915_request.c
415
i915_request_put(rq);
drivers/gpu/drm/i915/i915_request.c
420
void i915_request_retire_upto(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
422
struct intel_timeline * const tl = i915_request_timeline(rq);
drivers/gpu/drm/i915/i915_request.c
425
RQ_TRACE(rq, "\n");
drivers/gpu/drm/i915/i915_request.c
426
GEM_BUG_ON(!__i915_request_is_complete(rq));
drivers/gpu/drm/i915/i915_request.c
431
} while (i915_request_retire(tmp) && tmp != rq);
drivers/gpu/drm/i915/i915_request.c
442
struct i915_request * const *port, *rq;
drivers/gpu/drm/i915/i915_request.c
489
(rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
drivers/gpu/drm/i915/i915_request.c
491
if (rq->context == signal->context) {
drivers/gpu/drm/i915/i915_request.c
492
inflight = i915_seqno_passed(rq->fence.seqno,
drivers/gpu/drm/i915/i915_request.c
503
__await_execution(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.c
516
cb->fence = &rq->submit;
drivers/gpu/drm/i915/i915_request.c
554
void __i915_request_skip(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
556
GEM_BUG_ON(!fatal_error(rq->fence.error));
drivers/gpu/drm/i915/i915_request.c
558
if (rq->infix == rq->postfix)
drivers/gpu/drm/i915/i915_request.c
561
RQ_TRACE(rq, "error: %d\n", rq->fence.error);
drivers/gpu/drm/i915/i915_request.c
568
__i915_request_fill(rq, 0);
drivers/gpu/drm/i915/i915_request.c
569
rq->infix = rq->postfix;
drivers/gpu/drm/i915/i915_request.c
572
bool i915_request_set_error_once(struct i915_request *rq, int error)
drivers/gpu/drm/i915/i915_request.c
578
if (i915_request_signaled(rq))
drivers/gpu/drm/i915/i915_request.c
581
old = READ_ONCE(rq->fence.error);
drivers/gpu/drm/i915/i915_request.c
585
} while (!try_cmpxchg(&rq->fence.error, &old, error));
drivers/gpu/drm/i915/i915_request.c
590
struct i915_request *i915_request_mark_eio(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.c
592
if (__i915_request_is_complete(rq))
drivers/gpu/drm/i915/i915_request.c
595
GEM_BUG_ON(i915_request_signaled(rq));
drivers/gpu/drm/i915/i915_request.c
598
rq = i915_request_get(rq);
drivers/gpu/drm/i915/i915_request.c
600
i915_request_set_error_once(rq, -EIO);
drivers/gpu/drm/i915/i915_request.c
601
i915_request_mark_complete(rq);
drivers/gpu/drm/i915/i915_request.c
603
return rq;
drivers/gpu/drm/i915/i915_request.c
763
void i915_request_cancel(struct i915_request *rq, int error)
drivers/gpu/drm/i915/i915_request.c
765
if (!i915_request_set_error_once(rq, error))
drivers/gpu/drm/i915/i915_request.c
768
set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.c
770
intel_context_cancel_request(rq->context, rq);
drivers/gpu/drm/i915/i915_request.c
812
struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
drivers/gpu/drm/i915/i915_request.c
819
i915_request_put(rq);
drivers/gpu/drm/i915/i915_request.c
828
struct i915_request *rq, *rn;
drivers/gpu/drm/i915/i915_request.c
830
list_for_each_entry_safe(rq, rn, &tl->requests, link)
drivers/gpu/drm/i915/i915_request.c
831
if (!i915_request_retire(rq))
drivers/gpu/drm/i915/i915_request.c
840
struct i915_request *rq;
drivers/gpu/drm/i915/i915_request.c
844
rq = xchg(rsvd, NULL);
drivers/gpu/drm/i915/i915_request.c
845
if (!rq) /* Use the normal failure path for one final WARN */
drivers/gpu/drm/i915/i915_request.c
848
return rq;
drivers/gpu/drm/i915/i915_request.c
855
rq = list_first_entry(&tl->requests, typeof(*rq), link);
drivers/gpu/drm/i915/i915_request.c
856
i915_request_retire(rq);
drivers/gpu/drm/i915/i915_request.c
858
rq = kmem_cache_alloc(slab_requests,
drivers/gpu/drm/i915/i915_request.c
860
if (rq)
drivers/gpu/drm/i915/i915_request.c
861
return rq;
drivers/gpu/drm/i915/i915_request.c
864
rq = list_last_entry(&tl->requests, typeof(*rq), link);
drivers/gpu/drm/i915/i915_request.c
865
cond_synchronize_rcu(rq->rcustate);
drivers/gpu/drm/i915/i915_request.c
876
struct i915_request *rq = arg;
drivers/gpu/drm/i915/i915_request.c
878
spin_lock_init(&rq->lock);
drivers/gpu/drm/i915/i915_request.c
879
i915_sched_node_init(&rq->sched);
drivers/gpu/drm/i915/i915_request.c
880
i915_sw_fence_init(&rq->submit, submit_notify);
drivers/gpu/drm/i915/i915_request.c
881
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
drivers/gpu/drm/i915/i915_request.c
883
clear_capture_list(rq);
drivers/gpu/drm/i915/i915_request.c
884
rq->batch_res = NULL;
drivers/gpu/drm/i915/i915_request.c
886
init_llist_head(&rq->execute_cb);
drivers/gpu/drm/i915/i915_request.c
899
struct i915_request *rq;
drivers/gpu/drm/i915/i915_request.c
937
rq = kmem_cache_alloc(slab_requests,
drivers/gpu/drm/i915/i915_request.c
939
if (unlikely(!rq)) {
drivers/gpu/drm/i915/i915_request.c
940
rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
drivers/gpu/drm/i915/i915_request.c
941
if (!rq) {
drivers/gpu/drm/i915/i915_request.c
947
rq->context = ce;
drivers/gpu/drm/i915/i915_request.c
948
rq->engine = ce->engine;
drivers/gpu/drm/i915/i915_request.c
949
rq->ring = ce->ring;
drivers/gpu/drm/i915/i915_request.c
950
rq->execution_mask = ce->engine->mask;
drivers/gpu/drm/i915/i915_request.c
951
rq->i915 = ce->engine->i915;
drivers/gpu/drm/i915/i915_request.c
953
ret = intel_timeline_get_seqno(tl, rq, &seqno);
drivers/gpu/drm/i915/i915_request.c
957
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
drivers/gpu/drm/i915/i915_request.c
960
RCU_INIT_POINTER(rq->timeline, tl);
drivers/gpu/drm/i915/i915_request.c
961
rq->hwsp_seqno = tl->hwsp_seqno;
drivers/gpu/drm/i915/i915_request.c
962
GEM_BUG_ON(__i915_request_is_complete(rq));
drivers/gpu/drm/i915/i915_request.c
964
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
drivers/gpu/drm/i915/i915_request.c
966
rq->guc_prio = GUC_PRIO_INIT;
drivers/gpu/drm/i915/i915_request.c
969
i915_sw_fence_reinit(&i915_request_get(rq)->submit);
drivers/gpu/drm/i915/i915_request.c
970
i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
drivers/gpu/drm/i915/i915_request.c
972
i915_sched_node_reinit(&rq->sched);
drivers/gpu/drm/i915/i915_request.c
975
clear_batch_ptr(rq);
drivers/gpu/drm/i915/i915_request.c
976
__rq_init_watchdog(rq);
drivers/gpu/drm/i915/i915_request.c
977
assert_capture_list_is_null(rq);
drivers/gpu/drm/i915/i915_request.c
978
GEM_BUG_ON(!llist_empty(&rq->execute_cb));
drivers/gpu/drm/i915/i915_request.c
979
GEM_BUG_ON(rq->batch_res);
drivers/gpu/drm/i915/i915_request.c
993
rq->reserved_space =
drivers/gpu/drm/i915/i915_request.c
994
2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
drivers/gpu/drm/i915/i915_request.h
379
void __i915_request_skip(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
380
bool i915_request_set_error_once(struct i915_request *rq, int error);
drivers/gpu/drm/i915/i915_request.h
381
struct i915_request *i915_request_mark_eio(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
384
void __i915_request_queue(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
386
void __i915_request_queue_bh(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
388
bool i915_request_retire(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
389
void i915_request_retire_upto(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
401
i915_request_get(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
403
return to_request(dma_fence_get(&rq->fence));
drivers/gpu/drm/i915/i915_request.h
407
i915_request_get_rcu(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
409
return to_request(dma_fence_get_rcu(&rq->fence));
drivers/gpu/drm/i915/i915_request.h
413
i915_request_put(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
415
dma_fence_put(&rq->fence);
drivers/gpu/drm/i915/i915_request.h
421
int i915_request_await_dma_fence(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
423
int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
drivers/gpu/drm/i915/i915_request.h
424
int i915_request_await_execution(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
427
void i915_request_add(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
435
void i915_request_cancel(struct i915_request *rq, int error);
drivers/gpu/drm/i915/i915_request.h
437
long i915_request_wait_timeout(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
442
long i915_request_wait(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
451
const struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
455
static inline bool i915_request_signaled(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
458
return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
461
static inline bool i915_request_is_active(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
463
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
466
static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
468
return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
472
i915_request_has_initial_breadcrumb(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
474
return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
485
static inline u32 __hwsp_seqno(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
487
const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
drivers/gpu/drm/i915/i915_request.h
505
static inline u32 hwsp_seqno(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
510
seqno = __hwsp_seqno(rq);
drivers/gpu/drm/i915/i915_request.h
516
static inline bool __i915_request_has_started(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
518
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
drivers/gpu/drm/i915/i915_request.h
547
static inline bool i915_request_started(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
551
if (i915_request_signaled(rq))
drivers/gpu/drm/i915/i915_request.h
556
if (likely(!i915_request_signaled(rq)))
drivers/gpu/drm/i915/i915_request.h
558
result = __i915_request_has_started(rq);
drivers/gpu/drm/i915/i915_request.h
572
static inline bool i915_request_is_running(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
576
if (!i915_request_is_active(rq))
drivers/gpu/drm/i915/i915_request.h
580
result = __i915_request_has_started(rq) && i915_request_is_active(rq);
drivers/gpu/drm/i915/i915_request.h
602
static inline bool i915_request_is_ready(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
604
return !list_empty(&rq->sched.link);
drivers/gpu/drm/i915/i915_request.h
607
static inline bool __i915_request_is_complete(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
609
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
drivers/gpu/drm/i915/i915_request.h
612
static inline bool i915_request_completed(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
616
if (i915_request_signaled(rq))
drivers/gpu/drm/i915/i915_request.h
621
if (likely(!i915_request_signaled(rq)))
drivers/gpu/drm/i915/i915_request.h
622
result = __i915_request_is_complete(rq);
drivers/gpu/drm/i915/i915_request.h
628
static inline void i915_request_mark_complete(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
630
WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
drivers/gpu/drm/i915/i915_request.h
631
(u32 *)&rq->fence.seqno);
drivers/gpu/drm/i915/i915_request.h
634
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
636
return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
639
static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
642
return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
drivers/gpu/drm/i915/i915_request.h
645
static inline bool i915_request_has_sentinel(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
647
return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
drivers/gpu/drm/i915/i915_request.h
65
#define RQ_TRACE(rq, fmt, ...) do { \
drivers/gpu/drm/i915/i915_request.h
650
static inline bool i915_request_on_hold(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
652
return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
drivers/gpu/drm/i915/i915_request.h
655
static inline void i915_request_set_hold(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
657
set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
66
const struct i915_request *rq__ = (rq); \
drivers/gpu/drm/i915/i915_request.h
660
static inline void i915_request_clear_hold(struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
662
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
drivers/gpu/drm/i915/i915_request.h
666
i915_request_timeline(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
669
return rcu_dereference_protected(rq->timeline,
drivers/gpu/drm/i915/i915_request.h
670
lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
drivers/gpu/drm/i915/i915_request.h
671
test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
drivers/gpu/drm/i915/i915_request.h
675
i915_request_gem_context(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
678
return rcu_dereference_protected(rq->context->gem_context, true);
drivers/gpu/drm/i915/i915_request.h
682
i915_request_active_timeline(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
689
return rcu_dereference_protected(rq->timeline,
drivers/gpu/drm/i915/i915_request.h
690
lockdep_is_held(&rq->engine->sched_engine->lock));
drivers/gpu/drm/i915/i915_request.h
694
i915_request_active_seqno(const struct i915_request *rq)
drivers/gpu/drm/i915/i915_request.h
697
page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
drivers/gpu/drm/i915/i915_request.h
698
u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
drivers/gpu/drm/i915/i915_request.h
715
i915_request_active_engine(struct i915_request *rq,
drivers/gpu/drm/i915/i915_request.h
718
void i915_request_notify_execute_cb_imm(struct i915_request *rq);
drivers/gpu/drm/i915/i915_request.h
728
enum i915_request_state i915_test_request_state(struct i915_request *rq);
drivers/gpu/drm/i915/i915_scheduler.c
133
const struct i915_request *rq = node_to_request(node);
drivers/gpu/drm/i915/i915_scheduler.c
144
while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) {
drivers/gpu/drm/i915/i915_scheduler.c
289
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
drivers/gpu/drm/i915/i915_scheduler.c
292
__i915_schedule(&rq->sched, attr);
drivers/gpu/drm/i915/i915_scheduler.c
410
const struct i915_request *rq,
drivers/gpu/drm/i915/i915_scheduler.c
416
i915_request_show(m, rq, prefix, indent);
drivers/gpu/drm/i915/i915_scheduler.c
417
if (i915_request_completed(rq))
drivers/gpu/drm/i915/i915_scheduler.c
421
for_each_signaler(dep, rq) {
drivers/gpu/drm/i915/i915_scheduler.c
426
if (signaler->timeline == rq->timeline)
drivers/gpu/drm/i915/i915_scheduler.h
96
const struct i915_request *rq,
drivers/gpu/drm/i915/i915_scheduler_types.h
179
void (*kick_backend)(const struct i915_request *rq,
drivers/gpu/drm/i915/i915_scheduler_types.h
185
void (*bump_inflight_request_prio)(struct i915_request *rq,
drivers/gpu/drm/i915/i915_scheduler_types.h
192
void (*retire_inflight_request_prio)(struct i915_request *rq);
drivers/gpu/drm/i915/i915_trace.h
267
TP_PROTO(struct i915_request *rq, u32 flags),
drivers/gpu/drm/i915/i915_trace.h
268
TP_ARGS(rq, flags),
drivers/gpu/drm/i915/i915_trace.h
280
__entry->dev = rq->i915->drm.primary->index;
drivers/gpu/drm/i915/i915_trace.h
281
__entry->class = rq->engine->uabi_class;
drivers/gpu/drm/i915/i915_trace.h
282
__entry->instance = rq->engine->uabi_instance;
drivers/gpu/drm/i915/i915_trace.h
283
__entry->ctx = rq->fence.context;
drivers/gpu/drm/i915/i915_trace.h
284
__entry->seqno = rq->fence.seqno;
drivers/gpu/drm/i915/i915_trace.h
294
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
295
TP_ARGS(rq),
drivers/gpu/drm/i915/i915_trace.h
307
__entry->dev = rq->i915->drm.primary->index;
drivers/gpu/drm/i915/i915_trace.h
308
__entry->class = rq->engine->uabi_class;
drivers/gpu/drm/i915/i915_trace.h
309
__entry->instance = rq->engine->uabi_instance;
drivers/gpu/drm/i915/i915_trace.h
310
__entry->ctx = rq->fence.context;
drivers/gpu/drm/i915/i915_trace.h
311
__entry->seqno = rq->fence.seqno;
drivers/gpu/drm/i915/i915_trace.h
312
__entry->tail = rq->tail;
drivers/gpu/drm/i915/i915_trace.h
321
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
322
TP_ARGS(rq)
drivers/gpu/drm/i915/i915_trace.h
327
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
328
TP_ARGS(rq)
drivers/gpu/drm/i915/i915_trace.h
332
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
333
TP_ARGS(rq)
drivers/gpu/drm/i915/i915_trace.h
337
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
338
TP_ARGS(rq)
drivers/gpu/drm/i915/i915_trace.h
342
TP_PROTO(struct i915_request *rq, unsigned int port),
drivers/gpu/drm/i915/i915_trace.h
343
TP_ARGS(rq, port),
drivers/gpu/drm/i915/i915_trace.h
356
__entry->dev = rq->i915->drm.primary->index;
drivers/gpu/drm/i915/i915_trace.h
357
__entry->class = rq->engine->uabi_class;
drivers/gpu/drm/i915/i915_trace.h
358
__entry->instance = rq->engine->uabi_instance;
drivers/gpu/drm/i915/i915_trace.h
359
__entry->ctx = rq->fence.context;
drivers/gpu/drm/i915/i915_trace.h
360
__entry->seqno = rq->fence.seqno;
drivers/gpu/drm/i915/i915_trace.h
361
__entry->prio = rq->sched.attr.priority;
drivers/gpu/drm/i915/i915_trace.h
372
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
373
TP_ARGS(rq),
drivers/gpu/drm/i915/i915_trace.h
385
__entry->dev = rq->i915->drm.primary->index;
drivers/gpu/drm/i915/i915_trace.h
386
__entry->class = rq->engine->uabi_class;
drivers/gpu/drm/i915/i915_trace.h
387
__entry->instance = rq->engine->uabi_instance;
drivers/gpu/drm/i915/i915_trace.h
388
__entry->ctx = rq->fence.context;
drivers/gpu/drm/i915/i915_trace.h
389
__entry->seqno = rq->fence.seqno;
drivers/gpu/drm/i915/i915_trace.h
390
__entry->completed = i915_request_completed(rq);
drivers/gpu/drm/i915/i915_trace.h
500
trace_i915_request_guc_submit(struct i915_request *rq)
drivers/gpu/drm/i915/i915_trace.h
505
trace_i915_request_submit(struct i915_request *rq)
drivers/gpu/drm/i915/i915_trace.h
510
trace_i915_request_execute(struct i915_request *rq)
drivers/gpu/drm/i915/i915_trace.h
515
trace_i915_request_in(struct i915_request *rq, unsigned int port)
drivers/gpu/drm/i915/i915_trace.h
520
trace_i915_request_out(struct i915_request *rq)
drivers/gpu/drm/i915/i915_trace.h
602
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
603
TP_ARGS(rq)
drivers/gpu/drm/i915/i915_trace.h
607
TP_PROTO(struct i915_request *rq, unsigned int flags),
drivers/gpu/drm/i915/i915_trace.h
608
TP_ARGS(rq, flags),
drivers/gpu/drm/i915/i915_trace.h
626
__entry->dev = rq->i915->drm.primary->index;
drivers/gpu/drm/i915/i915_trace.h
627
__entry->class = rq->engine->uabi_class;
drivers/gpu/drm/i915/i915_trace.h
628
__entry->instance = rq->engine->uabi_instance;
drivers/gpu/drm/i915/i915_trace.h
629
__entry->ctx = rq->fence.context;
drivers/gpu/drm/i915/i915_trace.h
630
__entry->seqno = rq->fence.seqno;
drivers/gpu/drm/i915/i915_trace.h
641
TP_PROTO(struct i915_request *rq),
drivers/gpu/drm/i915/i915_trace.h
642
TP_ARGS(rq)
drivers/gpu/drm/i915/i915_vma.c
1953
__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
drivers/gpu/drm/i915/i915_vma.c
1955
return __i915_request_await_exclusive(rq, &vma->active);
drivers/gpu/drm/i915/i915_vma.c
1958
static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
drivers/gpu/drm/i915/i915_vma.c
1963
err = __i915_request_await_bind(rq, vma);
drivers/gpu/drm/i915/i915_vma.c
1967
return i915_active_add_request(&vma->active, rq);
drivers/gpu/drm/i915/i915_vma.c
1971
struct i915_request *rq,
drivers/gpu/drm/i915/i915_vma.c
1983
err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
drivers/gpu/drm/i915/i915_vma.c
1987
err = __i915_vma_move_to_active(vma, rq);
drivers/gpu/drm/i915/i915_vma.c
2012
i915_active_add_request(&front->write, rq);
drivers/gpu/drm/i915/i915_vma.c
2036
i915_active_add_request(&vma->fence->active, rq);
drivers/gpu/drm/i915/i915_vma.h
61
struct i915_request *rq,
drivers/gpu/drm/i915/i915_vma.h
65
i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
drivers/gpu/drm/i915/i915_vma.h
68
return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
107
rq = i915_request_create(ce);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
108
if (IS_ERR(rq))
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
109
return PTR_ERR(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
112
err = ce->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
117
cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
126
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
129
i915_request_get(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
132
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
134
pxp_request_commit(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
136
if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
139
i915_request_put(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
83
static void pxp_request_commit(struct i915_request *rq)
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
86
struct intel_timeline * const tl = i915_request_timeline(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
88
lockdep_unpin_lock(&tl->mutex, rq->cookie);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
90
trace_i915_request_add(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
91
__i915_request_commit(rq);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
92
__i915_request_queue(rq, &attr);
drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
99
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_active.c
103
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_active.c
105
rq = intel_engine_create_kernel_request(engine);
drivers/gpu/drm/i915/selftests/i915_active.c
106
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_active.c
107
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_active.c
111
err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
drivers/gpu/drm/i915/selftests/i915_active.c
115
err = i915_active_add_request(&active->base, rq);
drivers/gpu/drm/i915/selftests/i915_active.c
116
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_gem.c
28
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_gem.c
30
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/selftests/i915_gem.c
31
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_gem.c
32
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_gem.c
36
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
457
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
465
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
469
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
471
if (PTR_ERR(rq) != -EBUSY) {
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
474
(int)PTR_ERR(rq));
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
475
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
481
err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
487
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
491
last = i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
161
static int write_timestamp(struct i915_request *rq, int slot)
drivers/gpu/drm/i915/selftests/i915_perf.c
166
cs = intel_ring_begin(rq, 6);
drivers/gpu/drm/i915/selftests/i915_perf.c
171
if (GRAPHICS_VER(rq->i915) >= 8)
drivers/gpu/drm/i915/selftests/i915_perf.c
183
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_perf.c
188
static ktime_t poll_status(struct i915_request *rq, int slot)
drivers/gpu/drm/i915/selftests/i915_perf.c
190
while (!intel_read_status_page(rq->engine, slot) &&
drivers/gpu/drm/i915/selftests/i915_perf.c
191
!i915_request_completed(rq))
drivers/gpu/drm/i915/selftests/i915_perf.c
201
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_perf.c
224
rq = intel_engine_create_kernel_request(stream->engine);
drivers/gpu/drm/i915/selftests/i915_perf.c
225
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_perf.c
226
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
230
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/selftests/i915_perf.c
231
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
233
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
238
err = write_timestamp(rq, 0x100);
drivers/gpu/drm/i915/selftests/i915_perf.c
240
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
244
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/selftests/i915_perf.c
248
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
252
err = write_timestamp(rq, 0x102);
drivers/gpu/drm/i915/selftests/i915_perf.c
254
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
258
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
259
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
262
t0 = poll_status(rq, 0x100);
drivers/gpu/drm/i915/selftests/i915_perf.c
263
t1 = poll_status(rq, 0x102);
drivers/gpu/drm/i915/selftests/i915_perf.c
283
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
294
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_perf.c
319
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/selftests/i915_perf.c
320
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_perf.c
321
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
324
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
326
if (rq->engine->emit_init_breadcrumb) {
drivers/gpu/drm/i915/selftests/i915_perf.c
327
err = rq->engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
329
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
335
cs = intel_ring_begin(rq, 2 * 32 + 2);
drivers/gpu/drm/i915/selftests/i915_perf.c
338
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
348
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_perf.c
351
err = rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/selftests/i915_perf.c
355
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
360
store = memset32(rq->engine->status_page.addr + 512, 0, 32);
drivers/gpu/drm/i915/selftests/i915_perf.c
364
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/selftests/i915_perf.c
367
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
378
*cs++ = i915_ggtt_offset(rq->engine->status_page.vma) +
drivers/gpu/drm/i915/selftests/i915_perf.c
382
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_perf.c
385
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_perf.c
387
if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) {
drivers/gpu/drm/i915/selftests/i915_perf.c
412
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1009
static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
drivers/gpu/drm/i915/selftests/i915_request.c
1011
return rq->engine->emit_bb_start(rq,
drivers/gpu/drm/i915/selftests/i915_request.c
1276
struct i915_request *rq = request[idx];
drivers/gpu/drm/i915/selftests/i915_request.c
1279
timeout = i915_request_wait(rq, 0,
drivers/gpu/drm/i915/selftests/i915_request.c
1288
GEM_BUG_ON(!i915_request_completed(rq));
drivers/gpu/drm/i915/selftests/i915_request.c
1289
i915_vma_unpin(rq->batch);
drivers/gpu/drm/i915/selftests/i915_request.c
1290
i915_vma_put(rq->batch);
drivers/gpu/drm/i915/selftests/i915_request.c
1291
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1301
struct i915_request *rq = request[idx];
drivers/gpu/drm/i915/selftests/i915_request.c
1303
if (!rq)
drivers/gpu/drm/i915/selftests/i915_request.c
1306
if (rq->batch) {
drivers/gpu/drm/i915/selftests/i915_request.c
1307
i915_vma_unpin(rq->batch);
drivers/gpu/drm/i915/selftests/i915_request.c
1308
i915_vma_put(rq->batch);
drivers/gpu/drm/i915/selftests/i915_request.c
1310
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1477
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
1479
rq = i915_request_create(engine->kernel_context);
drivers/gpu/drm/i915/selftests/i915_request.c
1480
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1481
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1485
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1486
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1489
if (i915_request_wait(rq, 0, HZ) < 0)
drivers/gpu/drm/i915/selftests/i915_request.c
1491
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1515
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
1517
rq = i915_request_create(engine->kernel_context);
drivers/gpu/drm/i915/selftests/i915_request.c
1518
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1519
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1523
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1561
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
1577
rq = igt_spinner_create_request(&spin,
drivers/gpu/drm/i915/selftests/i915_request.c
1581
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1582
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1589
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1590
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1591
if (igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1600
if (err == 0 && i915_request_wait(rq, 0, HZ) < 0)
drivers/gpu/drm/i915/selftests/i915_request.c
1602
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1691
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
1706
rq = igt_request_alloc(ctx, engine);
drivers/gpu/drm/i915/selftests/i915_request.c
1707
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
1708
ret = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1712
ret = rq->ring->size - rq->reserved_space;
drivers/gpu/drm/i915/selftests/i915_request.c
1713
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1715
sz = rq->ring->emit - rq->head;
drivers/gpu/drm/i915/selftests/i915_request.c
1717
sz += rq->ring->size;
drivers/gpu/drm/i915/selftests/i915_request.c
1890
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
1893
rq = intel_engine_create_kernel_request(ce->engine);
drivers/gpu/drm/i915/selftests/i915_request.c
1894
if (IS_ERR(rq))
drivers/gpu/drm/i915/selftests/i915_request.c
1895
return PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1899
i915_request_await_dma_fence(rq, fence);
drivers/gpu/drm/i915/selftests/i915_request.c
1903
rq = i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1904
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
1905
if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
drivers/gpu/drm/i915/selftests/i915_request.c
1907
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2019
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2037
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2038
if (IS_ERR(rq))
drivers/gpu/drm/i915/selftests/i915_request.c
2039
return PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2041
cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed));
drivers/gpu/drm/i915/selftests/i915_request.c
2043
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2055
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2056
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2110
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2116
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2117
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2118
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2122
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/selftests/i915_request.c
2124
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2131
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2136
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2182
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2184
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2185
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2186
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2190
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/selftests/i915_request.c
2192
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2201
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2211
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2242
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2245
rq = i915_request_create(engine->kernel_context);
drivers/gpu/drm/i915/selftests/i915_request.c
2246
if (IS_ERR(rq))
drivers/gpu/drm/i915/selftests/i915_request.c
2247
return PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2249
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/selftests/i915_request.c
2251
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2257
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2258
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2297
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2300
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2301
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2302
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2306
err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
drivers/gpu/drm/i915/selftests/i915_request.c
2310
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2314
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/selftests/i915_request.c
2316
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2323
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2324
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2387
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2389
rq = i915_request_create(arr[j]);
drivers/gpu/drm/i915/selftests/i915_request.c
2390
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2391
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2396
err = i915_request_await_dma_fence(rq,
drivers/gpu/drm/i915/selftests/i915_request.c
2399
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2404
cs = intel_ring_begin(rq, 4);
drivers/gpu/drm/i915/selftests/i915_request.c
2406
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2414
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2417
fence = i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2419
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2478
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2480
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2481
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2482
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2486
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/selftests/i915_request.c
2488
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2497
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2498
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2505
rq = i915_request_create(ce->engine->kernel_context);
drivers/gpu/drm/i915/selftests/i915_request.c
2506
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2507
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2511
cs = intel_ring_begin(rq, 8);
drivers/gpu/drm/i915/selftests/i915_request.c
2513
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2521
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2522
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
drivers/gpu/drm/i915/selftests/i915_request.c
2525
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2590
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2592
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
2593
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2594
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2598
cs = intel_ring_begin(rq, 12);
drivers/gpu/drm/i915/selftests/i915_request.c
2600
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2609
intel_ring_advance(rq, cs);
drivers/gpu/drm/i915/selftests/i915_request.c
2611
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
drivers/gpu/drm/i915/selftests/i915_request.c
2612
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2738
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2740
rq = i915_request_create(ps->ce[idx]);
drivers/gpu/drm/i915/selftests/i915_request.c
2741
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2742
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2746
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2747
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2749
if (i915_request_wait(rq, 0, HZ / 5) < 0)
drivers/gpu/drm/i915/selftests/i915_request.c
2751
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2772
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2774
rq = i915_request_create(ps->ce[idx]);
drivers/gpu/drm/i915/selftests/i915_request.c
2775
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
2776
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2780
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2781
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2786
prev = rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2806
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2808
rq = i915_request_create(ps->ce[idx]);
drivers/gpu/drm/i915/selftests/i915_request.c
2809
if (IS_ERR(rq))
drivers/gpu/drm/i915/selftests/i915_request.c
2810
return PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2812
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
2997
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
2999
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
3000
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
3001
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3005
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3006
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3009
if (i915_request_wait(rq, 0, HZ) < 0)
drivers/gpu/drm/i915/selftests/i915_request.c
3011
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3072
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
3074
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
3075
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
3076
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3080
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3081
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3087
prev = rq;
drivers/gpu/drm/i915/selftests/i915_request.c
3148
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
3150
rq = i915_request_create(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
3151
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
3152
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
3156
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
368
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
373
rq = t->request_alloc(ce);
drivers/gpu/drm/i915/selftests/i915_request.c
375
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
376
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
381
err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
drivers/gpu/drm/i915/selftests/i915_request.c
385
requests[n] = i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
386
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
390
&rq->fence,
drivers/gpu/drm/i915/selftests/i915_request.c
395
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
407
struct i915_request *rq = requests[count - 1];
drivers/gpu/drm/i915/selftests/i915_request.c
411
rq->fence.context, rq->fence.seqno,
drivers/gpu/drm/i915/selftests/i915_request.c
416
GEM_BUG_ON(!i915_request_completed(rq));
drivers/gpu/drm/i915/selftests/i915_request.c
422
struct i915_request *rq = requests[n];
drivers/gpu/drm/i915/selftests/i915_request.c
425
&rq->fence.flags)) {
drivers/gpu/drm/i915/selftests/i915_request.c
427
rq->fence.context, rq->fence.seqno);
drivers/gpu/drm/i915/selftests/i915_request.c
431
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
648
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
660
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/selftests/i915_request.c
661
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
662
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
667
i915_request_cancel(rq, -EINTR);
drivers/gpu/drm/i915/selftests/i915_request.c
668
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
669
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
671
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/selftests/i915_request.c
680
if (rq->fence.error != -EINTR) {
drivers/gpu/drm/i915/selftests/i915_request.c
682
engine->name, rq->fence.error);
drivers/gpu/drm/i915/selftests/i915_request.c
687
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
701
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
713
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/selftests/i915_request.c
714
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
715
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
720
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
721
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
722
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
730
i915_request_cancel(rq, -EINTR);
drivers/gpu/drm/i915/selftests/i915_request.c
732
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/selftests/i915_request.c
741
if (rq->fence.error != -EINTR) {
drivers/gpu/drm/i915/selftests/i915_request.c
743
engine->name, rq->fence.error);
drivers/gpu/drm/i915/selftests/i915_request.c
748
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
762
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/i915_request.c
774
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
drivers/gpu/drm/i915/selftests/i915_request.c
775
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
776
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
780
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
781
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
783
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
drivers/gpu/drm/i915/selftests/i915_request.c
789
i915_request_cancel(rq, -EINTR);
drivers/gpu/drm/i915/selftests/i915_request.c
790
if (rq->fence.error) {
drivers/gpu/drm/i915/selftests/i915_request.c
792
engine->name, rq->fence.error);
drivers/gpu/drm/i915/selftests/i915_request.c
797
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
823
struct i915_request *rq, *nop;
drivers/gpu/drm/i915/selftests/i915_request.c
843
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
drivers/gpu/drm/i915/selftests/i915_request.c
844
if (IS_ERR(rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
845
err = PTR_ERR(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
851
i915_request_get(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
852
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/i915_request.c
853
if (!igt_wait_for_spinner(&spin, rq)) {
drivers/gpu/drm/i915/selftests/i915_request.c
868
i915_request_cancel(rq, -EINTR);
drivers/gpu/drm/i915/selftests/i915_request.c
870
if (i915_request_wait(rq, 0, HZ) < 0) {
drivers/gpu/drm/i915/selftests/i915_request.c
879
if (rq->fence.error != -EINTR) {
drivers/gpu/drm/i915/selftests/i915_request.c
881
engine->name, rq->fence.error);
drivers/gpu/drm/i915/selftests/i915_request.c
904
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
118
const struct i915_request *rq)
drivers/gpu/drm/i915/selftests/igt_spinner.c
120
return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
drivers/gpu/drm/i915/selftests/igt_spinner.c
129
struct i915_request *rq = NULL;
drivers/gpu/drm/i915/selftests/igt_spinner.c
149
rq = intel_context_create_request(ce);
drivers/gpu/drm/i915/selftests/igt_spinner.c
150
if (IS_ERR(rq))
drivers/gpu/drm/i915/selftests/igt_spinner.c
151
return ERR_CAST(rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
153
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
drivers/gpu/drm/i915/selftests/igt_spinner.c
157
err = igt_vma_move_to_active_unlocked(hws, rq, 0);
drivers/gpu/drm/i915/selftests/igt_spinner.c
163
if (GRAPHICS_VER(rq->i915) >= 8) {
drivers/gpu/drm/i915/selftests/igt_spinner.c
165
*batch++ = lower_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/selftests/igt_spinner.c
166
*batch++ = upper_32_bits(hws_address(hws, rq));
drivers/gpu/drm/i915/selftests/igt_spinner.c
167
} else if (GRAPHICS_VER(rq->i915) >= 6) {
drivers/gpu/drm/i915/selftests/igt_spinner.c
170
*batch++ = hws_address(hws, rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
171
} else if (GRAPHICS_VER(rq->i915) >= 4) {
drivers/gpu/drm/i915/selftests/igt_spinner.c
174
*batch++ = hws_address(hws, rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
177
*batch++ = hws_address(hws, rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
179
*batch++ = rq->fence.seqno;
drivers/gpu/drm/i915/selftests/igt_spinner.c
186
if (GRAPHICS_VER(rq->i915) >= 8)
drivers/gpu/drm/i915/selftests/igt_spinner.c
188
else if (IS_HASWELL(rq->i915))
drivers/gpu/drm/i915/selftests/igt_spinner.c
190
else if (GRAPHICS_VER(rq->i915) >= 6)
drivers/gpu/drm/i915/selftests/igt_spinner.c
202
err = engine->emit_init_breadcrumb(rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
208
if (GRAPHICS_VER(rq->i915) <= 5)
drivers/gpu/drm/i915/selftests/igt_spinner.c
210
err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
drivers/gpu/drm/i915/selftests/igt_spinner.c
214
i915_request_set_error_once(rq, err);
drivers/gpu/drm/i915/selftests/igt_spinner.c
215
i915_request_add(rq);
drivers/gpu/drm/i915/selftests/igt_spinner.c
217
return err ? ERR_PTR(err) : rq;
drivers/gpu/drm/i915/selftests/igt_spinner.c
221
hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
drivers/gpu/drm/i915/selftests/igt_spinner.c
223
u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
drivers/gpu/drm/i915/selftests/igt_spinner.c
254
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
drivers/gpu/drm/i915/selftests/igt_spinner.c
256
if (i915_request_is_ready(rq))
drivers/gpu/drm/i915/selftests/igt_spinner.c
257
intel_engine_flush_submission(rq->engine);
drivers/gpu/drm/i915/selftests/igt_spinner.c
259
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
drivers/gpu/drm/i915/selftests/igt_spinner.c
260
rq->fence.seqno),
drivers/gpu/drm/i915/selftests/igt_spinner.c
262
wait_for(i915_seqno_passed(hws_seqno(spin, rq),
drivers/gpu/drm/i915/selftests/igt_spinner.c
263
rq->fence.seqno),
drivers/gpu/drm/i915/selftests/igt_spinner.h
41
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1027
struct i915_request *rq;
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1068
true, 0xdeadbeaf, &rq);
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1069
if (rq) {
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1070
dma_resv_add_fence(obj->base.resv, &rq->fence,
drivers/gpu/drm/i915/selftests/intel_memory_region.c
1072
i915_request_put(rq);
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
92
int intel_selftest_wait_for_rq(struct i915_request *rq)
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
96
ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS));
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
33
int intel_selftest_wait_for_rq(struct i915_request *rq);
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
39
int rq = ram->freq < 1000000; /* XXX */
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
96
ram->mr[3] |= (rq & 0x01) << 5;
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
117
u32 hi, lo, rq, tx;
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
122
i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
123
if (!hi && !lo && !rq && !tx)
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
130
if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
28
g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
33
for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
36
if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
28
gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
33
for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
36
if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
drivers/gpu/drm/scheduler/sched_entity.c
100
entity->rq = sched_list[0]->sched_rq[entity->priority];
drivers/gpu/drm/scheduler/sched_entity.c
237
if (!entity->rq)
drivers/gpu/drm/scheduler/sched_entity.c
242
drm_sched_rq_remove_entity(entity->rq, entity);
drivers/gpu/drm/scheduler/sched_entity.c
289
if (!entity->rq)
drivers/gpu/drm/scheduler/sched_entity.c
292
sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_entity.c
375
drm_sched_wakeup(entity->rq->sched);
drivers/gpu/drm/scheduler/sched_entity.c
402
struct drm_gpu_scheduler *sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_entity.c
504
struct drm_sched_rq *rq;
drivers/gpu/drm/scheduler/sched_entity.c
507
rq = entity->rq;
drivers/gpu/drm/scheduler/sched_entity.c
508
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_entity.c
509
drm_sched_rq_update_fifo_locked(entity, rq,
drivers/gpu/drm/scheduler/sched_entity.c
511
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_entity.c
529
struct drm_sched_rq *rq;
drivers/gpu/drm/scheduler/sched_entity.c
555
rq = sched ? sched->sched_rq[entity->priority] : NULL;
drivers/gpu/drm/scheduler/sched_entity.c
556
if (rq != entity->rq) {
drivers/gpu/drm/scheduler/sched_entity.c
557
drm_sched_rq_remove_entity(entity->rq, entity);
drivers/gpu/drm/scheduler/sched_entity.c
558
entity->rq = rq;
drivers/gpu/drm/scheduler/sched_entity.c
591
atomic_inc(entity->rq->sched->score);
drivers/gpu/drm/scheduler/sched_entity.c
605
struct drm_sched_rq *rq;
drivers/gpu/drm/scheduler/sched_entity.c
616
rq = entity->rq;
drivers/gpu/drm/scheduler/sched_entity.c
617
sched = rq->sched;
drivers/gpu/drm/scheduler/sched_entity.c
619
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_entity.c
620
drm_sched_rq_add_entity(rq, entity);
drivers/gpu/drm/scheduler/sched_entity.c
623
drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
drivers/gpu/drm/scheduler/sched_entity.c
625
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_entity.c
69
entity->rq = NULL;
drivers/gpu/drm/scheduler/sched_fence.c
230
fence->sched = entity->rq->sched;
drivers/gpu/drm/scheduler/sched_internal.h
15
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
drivers/gpu/drm/scheduler/sched_internal.h
17
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
drivers/gpu/drm/scheduler/sched_internal.h
21
struct drm_sched_rq *rq, ktime_t ts);
drivers/gpu/drm/scheduler/sched_main.c
1428
struct drm_sched_rq *rq = sched->sched_rq[i];
drivers/gpu/drm/scheduler/sched_main.c
1430
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
1431
list_for_each_entry(s_entity, &rq->entities, list) {
drivers/gpu/drm/scheduler/sched_main.c
146
struct drm_sched_rq *rq)
drivers/gpu/drm/scheduler/sched_main.c
1461
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
149
rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
drivers/gpu/drm/scheduler/sched_main.c
1510
struct drm_sched_rq *rq = sched->sched_rq[i];
drivers/gpu/drm/scheduler/sched_main.c
1512
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
1513
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
drivers/gpu/drm/scheduler/sched_main.c
1521
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
1522
if (&entity->list != &rq->entities)
drivers/gpu/drm/scheduler/sched_main.c
155
struct drm_sched_rq *rq,
drivers/gpu/drm/scheduler/sched_main.c
164
lockdep_assert_held(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
166
drm_sched_rq_remove_fifo_locked(entity, rq);
drivers/gpu/drm/scheduler/sched_main.c
170
rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
drivers/gpu/drm/scheduler/sched_main.c
183
struct drm_sched_rq *rq)
drivers/gpu/drm/scheduler/sched_main.c
185
spin_lock_init(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
186
INIT_LIST_HEAD(&rq->entities);
drivers/gpu/drm/scheduler/sched_main.c
187
rq->rb_tree_root = RB_ROOT_CACHED;
drivers/gpu/drm/scheduler/sched_main.c
188
rq->current_entity = NULL;
drivers/gpu/drm/scheduler/sched_main.c
189
rq->sched = sched;
drivers/gpu/drm/scheduler/sched_main.c
200
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
drivers/gpu/drm/scheduler/sched_main.c
204
lockdep_assert_held(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
209
atomic_inc(rq->sched->score);
drivers/gpu/drm/scheduler/sched_main.c
210
list_add_tail(&entity->list, &rq->entities);
drivers/gpu/drm/scheduler/sched_main.c
221
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
drivers/gpu/drm/scheduler/sched_main.c
229
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
231
atomic_dec(rq->sched->score);
drivers/gpu/drm/scheduler/sched_main.c
234
if (rq->current_entity == entity)
drivers/gpu/drm/scheduler/sched_main.c
235
rq->current_entity = NULL;
drivers/gpu/drm/scheduler/sched_main.c
238
drm_sched_rq_remove_fifo_locked(entity, rq);
drivers/gpu/drm/scheduler/sched_main.c
240
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
257
struct drm_sched_rq *rq)
drivers/gpu/drm/scheduler/sched_main.c
261
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
263
entity = rq->current_entity;
drivers/gpu/drm/scheduler/sched_main.c
265
list_for_each_entry_continue(entity, &rq->entities, list) {
drivers/gpu/drm/scheduler/sched_main.c
271
list_for_each_entry(entity, &rq->entities, list) {
drivers/gpu/drm/scheduler/sched_main.c
275
if (entity == rq->current_entity)
drivers/gpu/drm/scheduler/sched_main.c
279
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
291
rq->current_entity = entity;
drivers/gpu/drm/scheduler/sched_main.c
295
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
314
struct drm_sched_rq *rq)
drivers/gpu/drm/scheduler/sched_main.c
318
spin_lock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
319
for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
drivers/gpu/drm/scheduler/sched_main.c
328
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
336
spin_unlock(&rq->lock);
drivers/gpu/drm/scheduler/sched_main.c
805
if (!entity->rq) {
drivers/gpu/drm/scheduler/sched_main.c
865
sched = entity->rq->sched;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1161
bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1168
qplib_qp->rq.sg_info.umem = umem;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1169
qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1170
qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1258
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1259
qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1269
qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1270
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1271
qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1272
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1274
qp->qplib_qp.rq.q_full_delta = 1;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1275
qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1276
qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1307
struct bnxt_qplib_q *rq;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1312
rq = &qplqp->rq;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1320
rq->max_wqe = 0;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1322
rq->max_sge = init_attr->cap.max_recv_sge;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1323
if (rq->max_sge > dev_attr->max_qp_sges)
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1324
rq->max_sge = dev_attr->max_qp_sges;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1325
init_attr->cap.max_recv_sge = rq->max_sge;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1326
rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1332
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1333
rq->max_sw_wqe = rq->max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1334
rq->q_full_delta = 0;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1335
rq->sg_info.pgsize = PAGE_SIZE;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1336
rq->sg_info.pgshft = PAGE_SHIFT;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1353
qplqp->rq.max_sge = dev_attr->max_qp_sges;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1354
if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1355
qplqp->rq.max_sge = dev_attr->max_qp_sges;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
1356
qplqp->rq.max_sge = 6;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2267
qp->qplib_qp.rq.psn = qp_attr->rq_psn;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2326
if (qp->qplib_qp.rq.max_wqe) {
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2328
qp->qplib_qp.rq.max_wqe =
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2330
qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2331
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2333
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2394
qp_attr->rq_psn = qplib_qp->rq.psn;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2404
qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
2405
qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
drivers/infiniband/hw/bnxt_re/ib_verbs.c
3018
if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
drivers/infiniband/hw/bnxt_re/ib_verbs.c
3055
if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
drivers/infiniband/hw/bnxt_re/main.c
1221
if (rdma_nl_put_driver_u32(msg, "rq_max_wqe", qplib_qp->rq.max_wqe))
drivers/infiniband/hw/bnxt_re/main.c
1223
if (rdma_nl_put_driver_u32(msg, "rq_max_sge", qplib_qp->rq.max_sge))
drivers/infiniband/hw/bnxt_re/main.c
1225
if (rdma_nl_put_driver_u32(msg, "rq_wqe_size", qplib_qp->rq.wqe_size))
drivers/infiniband/hw/bnxt_re/main.c
1227
if (rdma_nl_put_driver_u32(msg, "rq_swq_start", qplib_qp->rq.swq_start))
drivers/infiniband/hw/bnxt_re/main.c
1229
if (rdma_nl_put_driver_u32(msg, "rq_swq_last", qplib_qp->rq.swq_last))
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1044
rq->dbinfo.flags = 0;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1046
hwq_attr.sginfo = &rq->sg_info;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1048
hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1052
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1055
if (!rq->hwq.is_user) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1056
rc = bnxt_qplib_alloc_init_swq(rq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1061
req.rq_size = cpu_to_le32(rq->max_wqe);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1062
pbl = &rq->hwq.pbl[PBL_LVL_0];
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1064
pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1066
pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1069
6 : rq->max_sge;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1149
if (rq->max_wqe) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1150
rq->dbinfo.hwq = &rq->hwq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1151
rq->dbinfo.xid = qp->id;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1152
rq->dbinfo.db = qp->dpi->dbr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1153
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1167
kfree(rq->swq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1169
bnxt_qplib_free_hwq(res, &rq->hwq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
134
if (qp->rq.flushed) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
135
qp->rq.flushed = false;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1402
req.rq_psn = cpu_to_le32(qp->rq.psn);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1416
req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1418
req.rq_sge = cpu_to_le16(qp->rq.max_sge);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
150
qp->rq.hwq.prod = 0;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
151
qp->rq.hwq.cons = 0;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1515
qp->rq.psn = le32_to_cpu(sb->rq_psn);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1521
qp->rq.max_wqe = qp->rq.hwq.max_elements;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1523
qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1623
bnxt_qplib_free_hwq(res, &qp->rq.hwq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1624
kfree(qp->rq.swq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1654
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1656
return rq->swq_start;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1667
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
1673
sw_prod = rq->swq_start;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
181
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
186
rq->max_wqe * qp->rq_hdr_buf_size,
drivers/infiniband/hw/bnxt_re/qplib_fp.c
203
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2115
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2117
bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2124
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2134
hwq = &rq->hwq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2143
if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2150
swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2152
swq->slots = rq->dbinfo.max_slot;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2181
bnxt_qplib_swq_mod_start(rq, wqe_idx);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2182
bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
219
if (qp->rq_hdr_buf_size && rq->max_wqe) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
221
rq->max_wqe *
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2411
static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2433
start = rq->swq_start;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2436
last = rq->swq_last;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2444
cqe->wr_id = rq->swq[last].wr_id;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2447
bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2448
rq->swq[last].slots, &rq->dbinfo.flags);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2449
rq->swq_last = rq->swq[last].next_idx;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2452
if (!*budget && rq->swq_last != start)
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2731
struct bnxt_qplib_q *rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2740
if (qp->rq.flushed) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2775
rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2776
if (wr_id_idx > (rq->max_wqe - 1)) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2779
wr_id_idx, rq->max_wqe);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2782
if (wr_id_idx != rq->swq_last)
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2784
swq = &rq->swq[rq->swq_last];
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2788
bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2789
swq->slots, &rq->dbinfo.flags);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2790
rq->swq_last = swq->next_idx;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2811
struct bnxt_qplib_q *rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2820
if (qp->rq.flushed) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2861
rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2862
if (wr_id_idx > (rq->max_wqe - 1)) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2865
wr_id_idx, rq->max_wqe);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2869
if (rq->swq_last != wr_id_idx)
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2871
swq = &rq->swq[rq->swq_last];
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2875
bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2876
swq->slots, &rq->dbinfo.flags);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2877
rq->swq_last = swq->next_idx;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2907
struct bnxt_qplib_q *rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2918
if (qp->rq.flushed) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2966
rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2967
if (wr_id_idx > (rq->max_wqe - 1)) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2970
wr_id_idx, rq->max_wqe);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2973
if (rq->swq_last != wr_id_idx)
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2975
swq = &rq->swq[rq->swq_last];
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2979
bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2980
swq->slots, &rq->dbinfo.flags);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
2981
rq->swq_last = swq->next_idx;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
3000
struct bnxt_qplib_q *sq, *rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
3020
rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
3070
} else if (cqe_cons > rq->max_wqe - 1) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
3073
cqe_cons, rq->max_wqe);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
3078
if (qp->rq.flushed) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
3128
__flush_rq(&qp->rq, qp, &cqe, &budget);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
822
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
866
if (rq->max_wqe) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
867
rq->dbinfo.flags = 0;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
869
hwq_attr.sginfo = &rq->sg_info;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
87
if (!qp->rq.flushed) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
871
hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
873
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
876
rc = bnxt_qplib_alloc_init_swq(rq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
879
req.rq_size = cpu_to_le32(rq->max_wqe);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
880
pbl = &rq->hwq.pbl[PBL_LVL_0];
drivers/infiniband/hw/bnxt_re/qplib_fp.c
882
pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
drivers/infiniband/hw/bnxt_re/qplib_fp.c
884
pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
887
cpu_to_le16((rq->max_sge &
drivers/infiniband/hw/bnxt_re/qplib_fp.c
91
qp->rq.flushed = true;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
914
if (rq->max_wqe) {
drivers/infiniband/hw/bnxt_re/qplib_fp.c
915
rq->dbinfo.hwq = &rq->hwq;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
916
rq->dbinfo.xid = qp->id;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
917
rq->dbinfo.db = qp->dpi->dbr;
drivers/infiniband/hw/bnxt_re/qplib_fp.c
918
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
929
kfree(rq->swq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
931
bnxt_qplib_free_hwq(res, &rq->hwq);
drivers/infiniband/hw/bnxt_re/qplib_fp.c
965
struct bnxt_qplib_q *rq = &qp->rq;
drivers/infiniband/hw/bnxt_re/qplib_fp.h
311
struct bnxt_qplib_q rq;
drivers/infiniband/hw/cxgb4/cq.c
206
int in_use = wq->rq.in_use - count;
drivers/infiniband/hw/cxgb4/cq.c
209
wq, cq, wq->rq.in_use, count);
drivers/infiniband/hw/cxgb4/cq.c
662
CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
drivers/infiniband/hw/cxgb4/cq.c
723
pr_debug("completing rq idx %u\n", wq->rq.cidx);
drivers/infiniband/hw/cxgb4/cq.c
724
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
drivers/infiniband/hw/cxgb4/cq.c
731
wq->rq.msn++;
drivers/infiniband/hw/cxgb4/device.c
113
le.qid = wq->rq.qid;
drivers/infiniband/hw/cxgb4/device.c
115
le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
drivers/infiniband/hw/cxgb4/device.c
116
le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
drivers/infiniband/hw/cxgb4/device.c
1289
t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
drivers/infiniband/hw/cxgb4/device.c
1290
qp->wq.rq.wq_pidx_inc = 0;
drivers/infiniband/hw/cxgb4/device.c
1391
qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
1397
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
drivers/infiniband/hw/cxgb4/device.c
1402
qp->wq.rq.wq_pidx_inc = 0;
drivers/infiniband/hw/cxgb4/device.c
272
qp->srq ? qp->srq->idx : qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
292
qp->wq.sq.qid, qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
306
qp->wq.sq.qid, qp->wq.rq.qid,
drivers/infiniband/hw/cxgb4/device.c
827
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
drivers/infiniband/hw/cxgb4/device.c
828
rdev->lldi.vr->rq.size,
drivers/infiniband/hw/cxgb4/device.c
845
rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
drivers/infiniband/hw/cxgb4/device.c
962
infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
77
#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
drivers/infiniband/hw/cxgb4/qp.c
1293
wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
drivers/infiniband/hw/cxgb4/qp.c
1294
qhp->wq.rq.wq_pidx *
drivers/infiniband/hw/cxgb4/qp.c
1305
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
drivers/infiniband/hw/cxgb4/qp.c
1307
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
drivers/infiniband/hw/cxgb4/qp.c
1310
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
drivers/infiniband/hw/cxgb4/qp.c
1316
wqe->recv.wrid = qhp->wq.rq.pidx;
drivers/infiniband/hw/cxgb4/qp.c
1322
(unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
drivers/infiniband/hw/cxgb4/qp.c
163
wq->rq.memsize, wq->rq.queue,
drivers/infiniband/hw/cxgb4/qp.c
164
dma_unmap_addr(&wq->rq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
165
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
drivers/infiniband/hw/cxgb4/qp.c
166
kfree(wq->rq.sw_rq);
drivers/infiniband/hw/cxgb4/qp.c
167
c4iw_put_qpid(rdev, wq->rq.qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
1812
wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
drivers/infiniband/hw/cxgb4/qp.c
1813
wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
drivers/infiniband/hw/cxgb4/qp.c
1814
wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
drivers/infiniband/hw/cxgb4/qp.c
1815
rhp->rdev.lldi.vr->rq.start);
drivers/infiniband/hw/cxgb4/qp.c
1851
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
drivers/infiniband/hw/cxgb4/qp.c
2168
qhp->wq.rq.size = rqsize;
drivers/infiniband/hw/cxgb4/qp.c
2169
qhp->wq.rq.memsize =
drivers/infiniband/hw/cxgb4/qp.c
2171
sizeof(*qhp->wq.rq.queue);
drivers/infiniband/hw/cxgb4/qp.c
2177
qhp->wq.rq.memsize =
drivers/infiniband/hw/cxgb4/qp.c
2178
roundup(qhp->wq.rq.memsize, PAGE_SIZE);
drivers/infiniband/hw/cxgb4/qp.c
218
wq->rq.qid = c4iw_get_qpid(rdev, uctx);
drivers/infiniband/hw/cxgb4/qp.c
219
if (!wq->rq.qid) {
drivers/infiniband/hw/cxgb4/qp.c
2261
uresp.rqid = qhp->wq.rq.qid;
drivers/infiniband/hw/cxgb4/qp.c
2262
uresp.rq_size = qhp->wq.rq.size;
drivers/infiniband/hw/cxgb4/qp.c
2263
uresp.rq_memsize = qhp->wq.rq.memsize;
drivers/infiniband/hw/cxgb4/qp.c
2296
rq_key_mm->vaddr = qhp->wq.rq.queue;
drivers/infiniband/hw/cxgb4/qp.c
2297
rq_key_mm->dma_addr = qhp->wq.rq.dma_addr;
drivers/infiniband/hw/cxgb4/qp.c
2298
rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
drivers/infiniband/hw/cxgb4/qp.c
2314
(u64)(unsigned long)qhp->wq.rq.bar2_pa;
drivers/infiniband/hw/cxgb4/qp.c
233
wq->rq.sw_rq = kzalloc_objs(*wq->rq.sw_rq, wq->rq.size);
drivers/infiniband/hw/cxgb4/qp.c
2339
&qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
drivers/infiniband/hw/cxgb4/qp.c
234
if (!wq->rq.sw_rq) {
drivers/infiniband/hw/cxgb4/qp.c
2353
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
drivers/infiniband/hw/cxgb4/qp.c
2354
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
drivers/infiniband/hw/cxgb4/qp.c
245
wq->rq.rqt_size =
drivers/infiniband/hw/cxgb4/qp.c
246
roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
drivers/infiniband/hw/cxgb4/qp.c
247
wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
drivers/infiniband/hw/cxgb4/qp.c
248
if (!wq->rq.rqt_hwaddr) {
drivers/infiniband/hw/cxgb4/qp.c
2563
wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
drivers/infiniband/hw/cxgb4/qp.c
261
wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
drivers/infiniband/hw/cxgb4/qp.c
262
wq->rq.memsize,
drivers/infiniband/hw/cxgb4/qp.c
263
&wq->rq.dma_addr,
drivers/infiniband/hw/cxgb4/qp.c
2632
rdev->lldi.vr->rq.start);
drivers/infiniband/hw/cxgb4/qp.c
265
if (!wq->rq.queue) {
drivers/infiniband/hw/cxgb4/qp.c
272
wq->rq.queue,
drivers/infiniband/hw/cxgb4/qp.c
273
(unsigned long long)virt_to_phys(wq->rq.queue));
drivers/infiniband/hw/cxgb4/qp.c
274
dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
drivers/infiniband/hw/cxgb4/qp.c
284
wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
drivers/infiniband/hw/cxgb4/qp.c
286
&wq->rq.bar2_qid,
drivers/infiniband/hw/cxgb4/qp.c
287
user ? &wq->rq.bar2_pa : NULL);
drivers/infiniband/hw/cxgb4/qp.c
292
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
drivers/infiniband/hw/cxgb4/qp.c
294
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
drivers/infiniband/hw/cxgb4/qp.c
300
wq->rq.msn = 1;
drivers/infiniband/hw/cxgb4/qp.c
356
eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
drivers/infiniband/hw/cxgb4/qp.c
374
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
drivers/infiniband/hw/cxgb4/qp.c
375
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
drivers/infiniband/hw/cxgb4/qp.c
384
wq->sq.qid, wq->rq.qid, wq->db,
drivers/infiniband/hw/cxgb4/qp.c
385
wq->sq.bar2_va, wq->rq.bar2_va);
drivers/infiniband/hw/cxgb4/qp.c
391
wq->rq.memsize, wq->rq.queue,
drivers/infiniband/hw/cxgb4/qp.c
392
dma_unmap_addr(&wq->rq, mapping));
drivers/infiniband/hw/cxgb4/qp.c
397
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
drivers/infiniband/hw/cxgb4/qp.c
400
kfree(wq->rq.sw_rq);
drivers/infiniband/hw/cxgb4/qp.c
405
c4iw_put_qpid(rdev, wq->rq.qid, uctx);
drivers/infiniband/hw/cxgb4/qp.c
761
ret = build_isgl((__be64 *)qhp->wq.rq.queue,
drivers/infiniband/hw/cxgb4/qp.c
762
(__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
drivers/infiniband/hw/cxgb4/qp.c
936
qhp->wq.rq.wq_pidx_inc += inc;
drivers/infiniband/hw/cxgb4/resource.c
398
rqt_start = rdev->lldi.vr->rq.start + skip;
drivers/infiniband/hw/cxgb4/resource.c
399
rqt_chunk = rdev->lldi.vr->rq.size - skip;
drivers/infiniband/hw/cxgb4/restrack.c
70
if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
drivers/infiniband/hw/cxgb4/restrack.c
72
if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
drivers/infiniband/hw/cxgb4/restrack.c
74
if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
drivers/infiniband/hw/cxgb4/restrack.c
76
if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
drivers/infiniband/hw/cxgb4/restrack.c
78
if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
drivers/infiniband/hw/cxgb4/restrack.c
80
if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
drivers/infiniband/hw/cxgb4/restrack.c
82
if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
drivers/infiniband/hw/cxgb4/restrack.c
84
if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
drivers/infiniband/hw/cxgb4/restrack.c
86
if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
drivers/infiniband/hw/cxgb4/restrack.c
88
if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
drivers/infiniband/hw/cxgb4/t4.h
384
struct t4_rq rq;
drivers/infiniband/hw/cxgb4/t4.h
482
return wq->rq.in_use;
drivers/infiniband/hw/cxgb4/t4.h
487
return wq->rq.in_use == 0;
drivers/infiniband/hw/cxgb4/t4.h
492
return wq->rq.size - 1 - wq->rq.in_use;
drivers/infiniband/hw/cxgb4/t4.h
497
wq->rq.in_use++;
drivers/infiniband/hw/cxgb4/t4.h
498
if (++wq->rq.pidx == wq->rq.size)
drivers/infiniband/hw/cxgb4/t4.h
499
wq->rq.pidx = 0;
drivers/infiniband/hw/cxgb4/t4.h
500
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
drivers/infiniband/hw/cxgb4/t4.h
501
if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
drivers/infiniband/hw/cxgb4/t4.h
502
wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
drivers/infiniband/hw/cxgb4/t4.h
507
wq->rq.in_use--;
drivers/infiniband/hw/cxgb4/t4.h
508
if (++wq->rq.cidx == wq->rq.size)
drivers/infiniband/hw/cxgb4/t4.h
509
wq->rq.cidx = 0;
drivers/infiniband/hw/cxgb4/t4.h
514
return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
drivers/infiniband/hw/cxgb4/t4.h
519
return wq->rq.size * T4_RQ_NUM_SLOTS;
drivers/infiniband/hw/cxgb4/t4.h
631
if (wq->rq.bar2_va) {
drivers/infiniband/hw/cxgb4/t4.h
632
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
drivers/infiniband/hw/cxgb4/t4.h
633
pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
drivers/infiniband/hw/cxgb4/t4.h
635
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
drivers/infiniband/hw/cxgb4/t4.h
638
pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
drivers/infiniband/hw/cxgb4/t4.h
639
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
drivers/infiniband/hw/cxgb4/t4.h
640
wq->rq.bar2_va + SGE_UDB_KDOORBELL);
drivers/infiniband/hw/cxgb4/t4.h
647
writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
drivers/infiniband/hw/cxgb4/t4.h
664
wq->rq.queue[wq->rq.size].status.db_off = 1;
drivers/infiniband/hw/cxgb4/t4.h
669
wq->rq.queue[wq->rq.size].status.db_off = 0;
drivers/infiniband/hw/hfi1/qp.c
664
srq ? srq->rq.size : qp->r_rq.size
drivers/infiniband/hw/hns/hns_roce_device.h
611
struct hns_roce_wq rq;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3916
if (hr_qp->rq.head != hr_qp->rq.tail)
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
3994
npolled += sw_comp(hr_qp, &hr_qp->rq,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4263
wq = &qp->rq;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4558
hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4587
hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4669
ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4671
if (hr_qp->rq.wqe_cnt && ret) {
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
4704
hr_qp->rq.wqe_cnt));
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5582
hr_qp->rq.head = 0;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5583
hr_qp->rq.tail = 0;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5610
spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5611
trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5612
hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5614
spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5872
qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
5873
qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
645
qp->rq.head & V2_DB_PRODUCER_IDX_M;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
651
hr_reg_write(&rq_db, DB_PI, qp->rq.head);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
654
qp->rq.db_reg);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
820
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
822
trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
837
spin_lock_irqsave(&hr_qp->rq.lock, flags);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
846
max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
848
if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
863
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
865
hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
870
hr_qp->rq.head += nreq;
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
874
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
988
ret = hns_roce_push_drain_wr(&hr_qp->rq, cq, rwr.wr_id);
drivers/infiniband/hw/hns/hns_roce_main.c
1142
spin_lock_irqsave(&hr_qp->rq.lock, flags_qp);
drivers/infiniband/hw/hns/hns_roce_main.c
1143
if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head))
drivers/infiniband/hw/hns/hns_roce_main.c
1145
spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp);
drivers/infiniband/hw/hns/hns_roce_qp.c
1037
if (hr_qp->rq.wqe_cnt) {
drivers/infiniband/hw/hns/hns_roce_qp.c
1038
rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
drivers/infiniband/hw/hns/hns_roce_qp.c
1047
hr_qp->rq.wrid = rq_wrid;
drivers/infiniband/hw/hns/hns_roce_qp.c
1057
kfree(hr_qp->rq.wrid);
drivers/infiniband/hw/hns/hns_roce_qp.c
1185
spin_lock_init(&hr_qp->rq.lock);
drivers/infiniband/hw/hns/hns_roce_qp.c
1468
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
drivers/infiniband/hw/hns/hns_roce_qp.c
1568
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
drivers/infiniband/hw/hns/hns_roce_qp.c
489
hr_qp->rq.rsv_sge = 1;
drivers/infiniband/hw/hns/hns_roce_qp.c
502
hr_qp->rq.wqe_cnt = 0;
drivers/infiniband/hw/hns/hns_roce_qp.c
503
hr_qp->rq.max_gs = 0;
drivers/infiniband/hw/hns/hns_roce_qp.c
526
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
drivers/infiniband/hw/hns/hns_roce_qp.c
527
hr_qp->rq.rsv_sge);
drivers/infiniband/hw/hns/hns_roce_qp.c
529
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
drivers/infiniband/hw/hns/hns_roce_qp.c
530
hr_qp->rq.max_gs);
drivers/infiniband/hw/hns/hns_roce_qp.c
532
hr_qp->rq.wqe_cnt = cnt;
drivers/infiniband/hw/hns/hns_roce_qp.c
535
cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
drivers/infiniband/hw/hns/hns_roce_qp.c
729
hr_qp->rq.offset = hr_qp->buff_size;
drivers/infiniband/hw/hns/hns_roce_qp.c
730
buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
drivers/infiniband/hw/hns/hns_roce_qp.c
731
hr_qp->rq.wqe_shift);
drivers/infiniband/hw/hns/hns_roce_qp.c
949
hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
drivers/infiniband/hw/hns/hns_roce_restrack.c
76
if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
drivers/infiniband/hw/hns/hns_roce_restrack.c
79
if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
drivers/infiniband/hw/ionic/ionic_controlpath.c
1367
wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
1368
wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
1561
ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
drivers/infiniband/hw/ionic/ionic_controlpath.c
1958
qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
1964
expdb_stride_log2 = qp->rq.stride_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2007
struct ionic_qp *qp, struct ionic_qdesc *rq,
drivers/infiniband/hw/ionic/ionic_controlpath.c
2021
rc = ionic_validate_qdesc_zero(rq);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2039
rc = ionic_validate_qdesc(rq);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2045
qp->rq.ptr = NULL;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2046
qp->rq.size = rq->size;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2047
qp->rq.mask = rq->mask;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2048
qp->rq.depth_log2 = rq->depth_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2049
qp->rq.stride_log2 = rq->stride_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2053
qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2079
rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
drivers/infiniband/hw/ionic/ionic_controlpath.c
2084
ionic_queue_dbell_init(&qp->rq, qp->qpid);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2086
qp->rq_meta = kmalloc_objs(*qp->rq_meta, (u32)qp->rq.mask + 1);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2092
for (i = 0; i < qp->rq.mask; ++i)
drivers/infiniband/hw/ionic/ionic_controlpath.c
2106
qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2119
ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2137
ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2231
rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
drivers/infiniband/hw/ionic/ionic_controlpath.c
2306
qp->rq.size,
drivers/infiniband/hw/ionic/ionic_controlpath.c
2359
attr->cap.max_recv_wr = qp->rq.mask;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2361
ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
drivers/infiniband/hw/ionic/ionic_controlpath.c
2437
if (!ionic_queue_empty(&qp->rq)) {
drivers/infiniband/hw/ionic/ionic_controlpath.c
2509
qp->rq.prod = 0;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2510
qp->rq.cons = 0;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2512
for (i = 0; i < qp->rq.mask; ++i)
drivers/infiniband/hw/ionic/ionic_controlpath.c
2612
attr->cap.max_recv_wr = qp->rq.mask;
drivers/infiniband/hw/ionic/ionic_datapath.c
1185
wqe = ionic_queue_at_prod(&qp->rq);
drivers/infiniband/hw/ionic/ionic_datapath.c
1198
mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
drivers/infiniband/hw/ionic/ionic_datapath.c
1214
ionic_queue_produce(&qp->rq);
drivers/infiniband/hw/ionic/ionic_datapath.c
1326
if (ionic_queue_full(&qp->rq)) {
drivers/infiniband/hw/ionic/ionic_datapath.c
1349
if (likely(qp->rq.prod != qp->rq_old_prod)) {
drivers/infiniband/hw/ionic/ionic_datapath.c
1351
spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
drivers/infiniband/hw/ionic/ionic_datapath.c
1354
qp->rq_old_prod = qp->rq.prod;
drivers/infiniband/hw/ionic/ionic_datapath.c
1357
ionic_queue_dbell_val(&qp->rq));
drivers/infiniband/hw/ionic/ionic_datapath.c
158
if (ionic_queue_empty(&qp->rq)) {
drivers/infiniband/hw/ionic/ionic_datapath.c
164
if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
drivers/infiniband/hw/ionic/ionic_datapath.c
257
ionic_queue_consume(&qp->rq);
drivers/infiniband/hw/ionic/ionic_datapath.c
39
if (ionic_queue_empty(&qp->rq))
drivers/infiniband/hw/ionic/ionic_datapath.c
42
wqe = ionic_queue_at_cons(&qp->rq);
drivers/infiniband/hw/ionic/ionic_datapath.c
45
if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
drivers/infiniband/hw/ionic/ionic_datapath.c
61
ionic_queue_consume(&qp->rq);
drivers/infiniband/hw/ionic/ionic_datapath.c
883
memset(wqe, 0, 1u << qp->rq.stride_log2);
drivers/infiniband/hw/ionic/ionic_ibdev.h
277
struct ionic_queue rq;
drivers/infiniband/hw/irdma/ctrl.c
2005
qp->rq_flush_code = info->rq;
drivers/infiniband/hw/irdma/ctrl.c
2640
if (info->rq && !qp->flush_rq)
drivers/infiniband/hw/irdma/ctrl.c
4787
info->rq = true;
drivers/infiniband/hw/irdma/hw.c
209
qp->rq_flush_code = info->rq;
drivers/infiniband/hw/irdma/hw.c
215
if (info->rq) {
drivers/infiniband/hw/irdma/hw.c
2685
if (hw_info->rq &&
drivers/infiniband/hw/irdma/hw.c
2743
if (info->rq) {
drivers/infiniband/hw/irdma/hw.c
2765
info->rq = false;
drivers/infiniband/hw/irdma/hw.c
2855
info.rq = flush_mask & IRDMA_FLUSH_RQ;
drivers/infiniband/hw/irdma/hw.c
2871
if (info.rq)
drivers/infiniband/hw/irdma/hw.c
2877
if (info.rq && iwqp->sc_qp.rq_flush_code)
drivers/infiniband/hw/irdma/hw.c
497
ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
drivers/infiniband/hw/irdma/hw.c
498
if (info->rq) {
drivers/infiniband/hw/irdma/type.h
1117
bool rq:1;
drivers/infiniband/hw/irdma/type.h
1309
bool rq:1;
drivers/infiniband/hw/irdma/uk.c
1717
qp->rq_base = info->rq;
drivers/infiniband/hw/irdma/user.h
546
struct irdma_qp_quanta *rq;
drivers/infiniband/hw/irdma/verbs.c
745
ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
drivers/infiniband/hw/irdma/verbs.c
747
ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
drivers/infiniband/hw/mlx4/cq.c
619
wq = is_send ? &qp->sq : &qp->rq;
drivers/infiniband/hw/mlx4/cq.c
755
wq = &(*cur_qp)->rq;
drivers/infiniband/hw/mlx4/main.c
3100
spin_lock_irqsave(&mqp->rq.lock, flags_qp);
drivers/infiniband/hw/mlx4/main.c
3103
if (mqp->rq.tail != mqp->rq.head) {
drivers/infiniband/hw/mlx4/main.c
3118
spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
drivers/infiniband/hw/mlx4/mlx4_ib.h
332
struct mlx4_ib_wq rq;
drivers/infiniband/hw/mlx4/qp.c
1060
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/mlx4/qp.c
1182
qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
drivers/infiniband/hw/mlx4/qp.c
1184
if (!qp->sq.wrid || !qp->rq.wrid) {
drivers/infiniband/hw/mlx4/qp.c
1274
kvfree(qp->rq.wrid);
drivers/infiniband/hw/mlx4/qp.c
1481
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx4/qp.c
1492
kvfree(qp->rq.wrid);
drivers/infiniband/hw/mlx4/qp.c
1497
if (qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx4/qp.c
181
return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
drivers/infiniband/hw/mlx4/qp.c
2217
if (qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx4/qp.c
2218
context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
drivers/infiniband/hw/mlx4/qp.c
2219
context->rq_size_stride |= qp->rq.wqe_shift - 4;
drivers/infiniband/hw/mlx4/qp.c
2442
if (qp->rq.wqe_cnt &&
drivers/infiniband/hw/mlx4/qp.c
2590
qp->rq.head = 0;
drivers/infiniband/hw/mlx4/qp.c
2591
qp->rq.tail = 0;
drivers/infiniband/hw/mlx4/qp.c
2595
if (qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx4/qp.c
349
qp->rq.wqe_cnt = qp->rq.max_gs = 0;
drivers/infiniband/hw/mlx4/qp.c
360
qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
drivers/infiniband/hw/mlx4/qp.c
361
qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
drivers/infiniband/hw/mlx4/qp.c
362
wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
drivers/infiniband/hw/mlx4/qp.c
363
qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
drivers/infiniband/hw/mlx4/qp.c
368
cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
drivers/infiniband/hw/mlx4/qp.c
369
cap->max_recv_sge = qp->rq.max_gs;
drivers/infiniband/hw/mlx4/qp.c
371
cap->max_recv_wr = qp->rq.max_post =
drivers/infiniband/hw/mlx4/qp.c
372
min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
drivers/infiniband/hw/mlx4/qp.c
373
cap->max_recv_sge = min(qp->rq.max_gs,
drivers/infiniband/hw/mlx4/qp.c
3863
max_gs = qp->rq.max_gs;
drivers/infiniband/hw/mlx4/qp.c
3864
spin_lock_irqsave(&qp->rq.lock, flags);
drivers/infiniband/hw/mlx4/qp.c
3874
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
drivers/infiniband/hw/mlx4/qp.c
3877
if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
drivers/infiniband/hw/mlx4/qp.c
3883
if (unlikely(wr->num_sge > qp->rq.max_gs)) {
drivers/infiniband/hw/mlx4/qp.c
3915
qp->rq.wrid[ind] = wr->wr_id;
drivers/infiniband/hw/mlx4/qp.c
3917
ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
drivers/infiniband/hw/mlx4/qp.c
3922
qp->rq.head += nreq;
drivers/infiniband/hw/mlx4/qp.c
3930
*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
drivers/infiniband/hw/mlx4/qp.c
3933
spin_unlock_irqrestore(&qp->rq.lock, flags);
drivers/infiniband/hw/mlx4/qp.c
4088
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
drivers/infiniband/hw/mlx4/qp.c
4089
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
drivers/infiniband/hw/mlx4/qp.c
425
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
drivers/infiniband/hw/mlx4/qp.c
427
if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
drivers/infiniband/hw/mlx4/qp.c
428
qp->rq.offset = 0;
drivers/infiniband/hw/mlx4/qp.c
429
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
drivers/infiniband/hw/mlx4/qp.c
431
qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
drivers/infiniband/hw/mlx4/qp.c
464
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
drivers/infiniband/hw/mlx4/qp.c
475
kmalloc_objs(struct mlx4_ib_buf, qp->rq.wqe_cnt);
drivers/infiniband/hw/mlx4/qp.c
478
for (i = 0; i < qp->rq.wqe_cnt; i++) {
drivers/infiniband/hw/mlx4/qp.c
511
for (i = 0; i < qp->rq.wqe_cnt; i++) {
drivers/infiniband/hw/mlx4/qp.c
878
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/mlx4/qp.c
916
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
drivers/infiniband/hw/mlx5/cq.c
199
wq = &qp->rq;
drivers/infiniband/hw/mlx5/cq.c
405
wq = (is_send) ? &qp->sq : &qp->rq;
drivers/infiniband/hw/mlx5/cq.c
551
wq = &(*cur_qp)->rq;
drivers/infiniband/hw/mlx5/devx.c
681
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/devx.c
685
rq->base.mqp.qpn) == obj_id ||
drivers/infiniband/hw/mlx5/devx.c
689
rq->tirn) == obj_id ||
drivers/infiniband/hw/mlx5/fs.c
1825
dst->tir_num = mqp->raw_packet_qp.rq.tirn;
drivers/infiniband/hw/mlx5/fs.c
2413
*dest_id = mqp->raw_packet_qp.rq.tirn;
drivers/infiniband/hw/mlx5/main.c
2961
spin_lock_irqsave(&mqp->rq.lock, flags_qp);
drivers/infiniband/hw/mlx5/main.c
2964
if (mqp->rq.tail != mqp->rq.head) {
drivers/infiniband/hw/mlx5/main.c
2979
spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
drivers/infiniband/hw/mlx5/mlx5_ib.h
449
struct mlx5_ib_wq *rq;
drivers/infiniband/hw/mlx5/mlx5_ib.h
469
struct mlx5_ib_rq rq;
drivers/infiniband/hw/mlx5/mlx5_ib.h
511
struct mlx5_ib_wq rq;
drivers/infiniband/hw/mlx5/odp.c
1320
struct mlx5_ib_wq *wq = &qp->rq;
drivers/infiniband/hw/mlx5/qp.c
1092
kvfree(qp->rq.wrid);
drivers/infiniband/hw/mlx5/qp.c
1125
qp->rq.offset = 0;
drivers/infiniband/hw/mlx5/qp.c
1126
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
drivers/infiniband/hw/mlx5/qp.c
1127
base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
drivers/infiniband/hw/mlx5/qp.c
1136
if (qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx5/qp.c
1137
mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
drivers/infiniband/hw/mlx5/qp.c
1138
ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
drivers/infiniband/hw/mlx5/qp.c
1186
qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
drivers/infiniband/hw/mlx5/qp.c
1187
sizeof(*qp->rq.wrid), GFP_KERNEL);
drivers/infiniband/hw/mlx5/qp.c
1192
if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
drivers/infiniband/hw/mlx5/qp.c
1205
kvfree(qp->rq.wrid);
drivers/infiniband/hw/mlx5/qp.c
1430
struct mlx5_ib_rq *rq, void *qpin,
drivers/infiniband/hw/mlx5/qp.c
1433
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
drivers/infiniband/hw/mlx5/qp.c
1439
struct ib_umem *umem = rq->base.ubuffer.umem;
drivers/infiniband/hw/mlx5/qp.c
1465
if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
drivers/infiniband/hw/mlx5/qp.c
1479
if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
drivers/infiniband/hw/mlx5/qp.c
1492
err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
drivers/infiniband/hw/mlx5/qp.c
1500
struct mlx5_ib_rq *rq)
drivers/infiniband/hw/mlx5/qp.c
1502
mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
drivers/infiniband/hw/mlx5/qp.c
1506
struct mlx5_ib_rq *rq,
drivers/infiniband/hw/mlx5/qp.c
1513
mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
drivers/infiniband/hw/mlx5/qp.c
1517
struct mlx5_ib_rq *rq, u32 tdn,
drivers/infiniband/hw/mlx5/qp.c
1535
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
drivers/infiniband/hw/mlx5/qp.c
1554
rq->tirn = MLX5_GET(create_tir_out, out, tirn);
drivers/infiniband/hw/mlx5/qp.c
1559
destroy_raw_packet_qp_tir(dev, rq, 0, pd);
drivers/infiniband/hw/mlx5/qp.c
1574
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/qp.c
1582
if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx5/qp.c
1605
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx5/qp.c
1606
rq->base.container_mibqp = qp;
drivers/infiniband/hw/mlx5/qp.c
1609
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
drivers/infiniband/hw/mlx5/qp.c
1611
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
drivers/infiniband/hw/mlx5/qp.c
1612
err = create_raw_packet_qp_rq(dev, rq, in, pd,
drivers/infiniband/hw/mlx5/qp.c
1617
err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
drivers/infiniband/hw/mlx5/qp.c
1623
resp->rqn = rq->base.mqp.qpn;
drivers/infiniband/hw/mlx5/qp.c
1625
resp->tirn = rq->tirn;
drivers/infiniband/hw/mlx5/qp.c
1646
rq->base.mqp.qpn;
drivers/infiniband/hw/mlx5/qp.c
1650
destroy_raw_packet_qp_rq(dev, rq);
drivers/infiniband/hw/mlx5/qp.c
1666
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/qp.c
1668
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx5/qp.c
1669
destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
drivers/infiniband/hw/mlx5/qp.c
1670
destroy_raw_packet_qp_rq(dev, rq);
drivers/infiniband/hw/mlx5/qp.c
1683
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/qp.c
1686
rq->rq = &qp->rq;
drivers/infiniband/hw/mlx5/qp.c
1688
rq->doorbell = &qp->db;
drivers/infiniband/hw/mlx5/qp.c
2092
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/mlx5/qp.c
2110
if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
drivers/infiniband/hw/mlx5/qp.c
2111
ucmd->rq_wqe_count != qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx5/qp.c
2146
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx5/qp.c
2147
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
drivers/infiniband/hw/mlx5/qp.c
2148
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
drivers/infiniband/hw/mlx5/qp.c
2253
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/mlx5/qp.c
2267
&qp->raw_packet_qp.rq.base :
drivers/infiniband/hw/mlx5/qp.c
2277
if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
drivers/infiniband/hw/mlx5/qp.c
2278
ucmd->rq_wqe_count != qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx5/qp.c
2334
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx5/qp.c
2335
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
drivers/infiniband/hw/mlx5/qp.c
2336
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
drivers/infiniband/hw/mlx5/qp.c
2453
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/mlx5/qp.c
2492
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx5/qp.c
2493
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
drivers/infiniband/hw/mlx5/qp.c
2494
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
drivers/infiniband/hw/mlx5/qp.c
253
struct mlx5_ib_wq *wq = &qp->rq;
drivers/infiniband/hw/mlx5/qp.c
2674
&qp->raw_packet_qp.rq.base :
drivers/infiniband/hw/mlx5/qp.c
273
struct mlx5_ib_wq *wq = &qp->rq;
drivers/infiniband/hw/mlx5/qp.c
3799
struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
drivers/infiniband/hw/mlx5/qp.c
3812
MLX5_SET(modify_rq_in, in, rq_state, rq->state);
drivers/infiniband/hw/mlx5/qp.c
3829
err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
drivers/infiniband/hw/mlx5/qp.c
3833
rq->state = new_state;
drivers/infiniband/hw/mlx5/qp.c
3921
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/qp.c
3923
int modify_rq = !!qp->rq.wqe_cnt;
drivers/infiniband/hw/mlx5/qp.c
3962
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
drivers/infiniband/hw/mlx5/qp.c
4070
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/qp.c
4074
if (!qp->rq.wqe_cnt)
drivers/infiniband/hw/mlx5/qp.c
4077
MLX5_SET(modify_rq_in, in, rq_state, rq->state);
drivers/infiniband/hw/mlx5/qp.c
4087
return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
drivers/infiniband/hw/mlx5/qp.c
4322
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
drivers/infiniband/hw/mlx5/qp.c
444
qp->rq.max_gs = 0;
drivers/infiniband/hw/mlx5/qp.c
4448
qp->rq.head = 0;
drivers/infiniband/hw/mlx5/qp.c
4449
qp->rq.tail = 0;
drivers/infiniband/hw/mlx5/qp.c
445
qp->rq.wqe_cnt = 0;
drivers/infiniband/hw/mlx5/qp.c
446
qp->rq.wqe_shift = 0;
drivers/infiniband/hw/mlx5/qp.c
453
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
drivers/infiniband/hw/mlx5/qp.c
456
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
drivers/infiniband/hw/mlx5/qp.c
457
if ((1 << qp->rq.wqe_shift) /
drivers/infiniband/hw/mlx5/qp.c
461
qp->rq.max_gs =
drivers/infiniband/hw/mlx5/qp.c
462
(1 << qp->rq.wqe_shift) /
drivers/infiniband/hw/mlx5/qp.c
465
qp->rq.max_post = qp->rq.wqe_cnt;
drivers/infiniband/hw/mlx5/qp.c
474
qp->rq.wqe_cnt = wq_size / wqe_size;
drivers/infiniband/hw/mlx5/qp.c
482
qp->rq.wqe_shift = ilog2(wqe_size);
drivers/infiniband/hw/mlx5/qp.c
483
qp->rq.max_gs =
drivers/infiniband/hw/mlx5/qp.c
484
(1 << qp->rq.wqe_shift) /
drivers/infiniband/hw/mlx5/qp.c
487
qp->rq.max_post = qp->rq.wqe_cnt;
drivers/infiniband/hw/mlx5/qp.c
4875
struct mlx5_ib_rq *rq,
drivers/infiniband/hw/mlx5/qp.c
4888
err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
drivers/infiniband/hw/mlx5/qp.c
4894
rq->state = *rq_state;
drivers/infiniband/hw/mlx5/qp.c
4936
qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
drivers/infiniband/hw/mlx5/qp.c
4952
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
drivers/infiniband/hw/mlx5/qp.c
4963
if (qp->rq.wqe_cnt) {
drivers/infiniband/hw/mlx5/qp.c
4964
err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
drivers/infiniband/hw/mlx5/qp.c
5142
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
drivers/infiniband/hw/mlx5/qp.c
5143
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
drivers/infiniband/hw/mlx5/qp.c
667
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
drivers/infiniband/hw/mlx5/qp.c
670
base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
drivers/infiniband/hw/mlx5/qp.c
993
qp->rq.offset = 0;
drivers/infiniband/hw/mlx5/qp.c
995
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
drivers/infiniband/hw/mlx5/qp.h
40
struct mlx5_core_qp *rq);
drivers/infiniband/hw/mlx5/qp.h
47
struct mlx5_core_qp *rq);
drivers/infiniband/hw/mlx5/qpc.c
600
struct mlx5_core_qp *rq)
drivers/infiniband/hw/mlx5/qpc.c
609
rq->uid = MLX5_GET(create_rq_in, in, uid);
drivers/infiniband/hw/mlx5/qpc.c
610
rq->qpn = rqn;
drivers/infiniband/hw/mlx5/qpc.c
611
err = create_resource_common(dev, rq, MLX5_RES_RQ);
drivers/infiniband/hw/mlx5/qpc.c
618
destroy_rq_tracked(dev, rq->qpn, rq->uid);
drivers/infiniband/hw/mlx5/qpc.c
624
struct mlx5_core_qp *rq)
drivers/infiniband/hw/mlx5/qpc.c
632
modify_resource_common_state(dev, rq, true);
drivers/infiniband/hw/mlx5/qpc.c
633
ret = destroy_rq_tracked(dev, rq->qpn, rq->uid);
drivers/infiniband/hw/mlx5/qpc.c
635
modify_resource_common_state(dev, rq, false);
drivers/infiniband/hw/mlx5/qpc.c
638
destroy_resource_common(dev, rq);
drivers/infiniband/hw/mlx5/wr.c
1229
spin_lock_irqsave(&qp->rq.lock, flags);
drivers/infiniband/hw/mlx5/wr.c
1231
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
drivers/infiniband/hw/mlx5/wr.c
1234
if (mlx5r_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
drivers/infiniband/hw/mlx5/wr.c
1240
if (unlikely(wr->num_sge > qp->rq.max_gs)) {
drivers/infiniband/hw/mlx5/wr.c
1246
scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
drivers/infiniband/hw/mlx5/wr.c
1253
if (i < qp->rq.max_gs) {
drivers/infiniband/hw/mlx5/wr.c
1261
set_sig_seg(sig, qp->rq.max_gs);
drivers/infiniband/hw/mlx5/wr.c
1264
qp->rq.wrid[ind] = wr->wr_id;
drivers/infiniband/hw/mlx5/wr.c
1266
ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
drivers/infiniband/hw/mlx5/wr.c
1271
qp->rq.head += nreq;
drivers/infiniband/hw/mlx5/wr.c
1278
*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
drivers/infiniband/hw/mlx5/wr.c
1281
spin_unlock_irqrestore(&qp->rq.lock, flags);
drivers/infiniband/hw/mthca/mthca_cq.c
538
(*cur_qp)->rq.max];
drivers/infiniband/hw/mthca/mthca_cq.c
548
wq = &(*cur_qp)->rq;
drivers/infiniband/hw/mthca/mthca_provider.c
498
qp->rq.db_index = ucmd.rq_db_index;
drivers/infiniband/hw/mthca/mthca_provider.c
545
init_attr->cap.max_recv_wr = qp->rq.max;
drivers/infiniband/hw/mthca/mthca_provider.c
547
init_attr->cap.max_recv_sge = qp->rq.max_gs;
drivers/infiniband/hw/mthca/mthca_provider.c
569
to_mqp(qp)->rq.db_index);
drivers/infiniband/hw/mthca/mthca_provider.h
267
struct mthca_wq rq;
drivers/infiniband/hw/mthca/mthca_qp.c
1004
for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
drivers/infiniband/hw/mthca/mthca_qp.c
1005
qp->rq.wqe_shift++)
drivers/infiniband/hw/mthca/mthca_qp.c
1052
qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
drivers/infiniband/hw/mthca/mthca_qp.c
1066
qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
drivers/infiniband/hw/mthca/mthca_qp.c
1137
qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
drivers/infiniband/hw/mthca/mthca_qp.c
1138
qp->qpn, &qp->rq.db);
drivers/infiniband/hw/mthca/mthca_qp.c
1139
if (qp->rq.db_index < 0)
drivers/infiniband/hw/mthca/mthca_qp.c
1145
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
drivers/infiniband/hw/mthca/mthca_qp.c
1158
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
drivers/infiniband/hw/mthca/mthca_qp.c
1182
mthca_wq_reset(&qp->rq);
drivers/infiniband/hw/mthca/mthca_qp.c
1185
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/mthca/mthca_qp.c
1217
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
drivers/infiniband/hw/mthca/mthca_qp.c
1219
for (i = 0; i < qp->rq.max; ++i) {
drivers/infiniband/hw/mthca/mthca_qp.c
1221
next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
drivers/infiniband/hw/mthca/mthca_qp.c
1222
qp->rq.wqe_shift);
drivers/infiniband/hw/mthca/mthca_qp.c
1226
(void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
drivers/infiniband/hw/mthca/mthca_qp.c
1238
for (i = 0; i < qp->rq.max; ++i) {
drivers/infiniband/hw/mthca/mthca_qp.c
1240
next->nda_op = htonl((((i + 1) % qp->rq.max) <<
drivers/infiniband/hw/mthca/mthca_qp.c
1241
qp->rq.wqe_shift) | 1);
drivers/infiniband/hw/mthca/mthca_qp.c
1247
qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
drivers/infiniband/hw/mthca/mthca_qp.c
1273
qp->rq.max = cap->max_recv_wr ?
drivers/infiniband/hw/mthca/mthca_qp.c
1278
qp->rq.max = cap->max_recv_wr;
drivers/infiniband/hw/mthca/mthca_qp.c
1282
qp->rq.max_gs = cap->max_recv_sge;
drivers/infiniband/hw/mthca/mthca_qp.c
1774
qp->wrid[ind + qp->rq.max] = wr->wr_id;
drivers/infiniband/hw/mthca/mthca_qp.c
1845
spin_lock_irqsave(&qp->rq.lock, flags);
drivers/infiniband/hw/mthca/mthca_qp.c
1849
ind = qp->rq.next_ind;
drivers/infiniband/hw/mthca/mthca_qp.c
1852
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
drivers/infiniband/hw/mthca/mthca_qp.c
1855
qp->rq.head, qp->rq.tail,
drivers/infiniband/hw/mthca/mthca_qp.c
1856
qp->rq.max, nreq);
drivers/infiniband/hw/mthca/mthca_qp.c
1863
prev_wqe = qp->rq.last;
drivers/infiniband/hw/mthca/mthca_qp.c
1864
qp->rq.last = wqe;
drivers/infiniband/hw/mthca/mthca_qp.c
1873
if (unlikely(wr->num_sge > qp->rq.max_gs)) {
drivers/infiniband/hw/mthca/mthca_qp.c
1894
if (unlikely(ind >= qp->rq.max))
drivers/infiniband/hw/mthca/mthca_qp.c
1895
ind -= qp->rq.max;
drivers/infiniband/hw/mthca/mthca_qp.c
1903
mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
drivers/infiniband/hw/mthca/mthca_qp.c
1907
qp->rq.next_ind = ind;
drivers/infiniband/hw/mthca/mthca_qp.c
1908
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
drivers/infiniband/hw/mthca/mthca_qp.c
1916
mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
drivers/infiniband/hw/mthca/mthca_qp.c
1921
qp->rq.next_ind = ind;
drivers/infiniband/hw/mthca/mthca_qp.c
1922
qp->rq.head += nreq;
drivers/infiniband/hw/mthca/mthca_qp.c
1924
spin_unlock_irqrestore(&qp->rq.lock, flags);
drivers/infiniband/hw/mthca/mthca_qp.c
2104
qp->wrid[ind + qp->rq.max] = wr->wr_id;
drivers/infiniband/hw/mthca/mthca_qp.c
211
return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
drivers/infiniband/hw/mthca/mthca_qp.c
213
return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
drivers/infiniband/hw/mthca/mthca_qp.c
214
((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
drivers/infiniband/hw/mthca/mthca_qp.c
2174
spin_lock_irqsave(&qp->rq.lock, flags);
drivers/infiniband/hw/mthca/mthca_qp.c
2178
ind = qp->rq.head & (qp->rq.max - 1);
drivers/infiniband/hw/mthca/mthca_qp.c
2181
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
drivers/infiniband/hw/mthca/mthca_qp.c
2184
qp->rq.head, qp->rq.tail,
drivers/infiniband/hw/mthca/mthca_qp.c
2185
qp->rq.max, nreq);
drivers/infiniband/hw/mthca/mthca_qp.c
2197
if (unlikely(wr->num_sge > qp->rq.max_gs)) {
drivers/infiniband/hw/mthca/mthca_qp.c
2208
if (i < qp->rq.max_gs)
drivers/infiniband/hw/mthca/mthca_qp.c
2214
if (unlikely(ind >= qp->rq.max))
drivers/infiniband/hw/mthca/mthca_qp.c
2215
ind -= qp->rq.max;
drivers/infiniband/hw/mthca/mthca_qp.c
2219
qp->rq.head += nreq;
drivers/infiniband/hw/mthca/mthca_qp.c
2226
*qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
drivers/infiniband/hw/mthca/mthca_qp.c
2229
spin_unlock_irqrestore(&qp->rq.lock, flags);
drivers/infiniband/hw/mthca/mthca_qp.c
506
qp_attr->cap.max_recv_wr = qp->rq.max;
drivers/infiniband/hw/mthca/mthca_qp.c
508
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
drivers/infiniband/hw/mthca/mthca_qp.c
615
if (qp->rq.max)
drivers/infiniband/hw/mthca/mthca_qp.c
616
qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
drivers/infiniband/hw/mthca/mthca_qp.c
617
qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
drivers/infiniband/hw/mthca/mthca_qp.c
777
qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
drivers/infiniband/hw/mthca/mthca_qp.c
843
mthca_wq_reset(&qp->rq);
drivers/infiniband/hw/mthca/mthca_qp.c
844
qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
drivers/infiniband/hw/mthca/mthca_qp.c
848
*qp->rq.db = 0;
drivers/infiniband/hw/mthca/mthca_qp.c
874
spin_lock(&qp->rq.lock);
drivers/infiniband/hw/mthca/mthca_qp.c
876
spin_unlock(&qp->rq.lock);
drivers/infiniband/hw/mthca/mthca_qp.c
977
qp->rq.max_gs = min_t(int, dev->limits.max_sg,
drivers/infiniband/hw/mthca/mthca_qp.c
978
(min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
drivers/infiniband/hw/mthca/mthca_qp.c
999
qp->rq.max_gs * sizeof (struct mthca_data_seg);
drivers/infiniband/hw/ocrdma/ocrdma.h
374
struct ocrdma_qp_hwq_info rq;
drivers/infiniband/hw/ocrdma/ocrdma.h
407
struct ocrdma_qp_hwq_info rq;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2125
qp->rq.head = 0;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2126
qp->rq.tail = 0;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2250
qp->rq.max_cnt = max_rqe_allocated;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2253
qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2254
if (!qp->rq.va)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2256
qp->rq.pa = pa;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2257
qp->rq.len = len;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2258
qp->rq.entry_size = dev->attr.rqe_size;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2269
cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2334
qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2357
qp->rq.max_cnt = max_rqe_allocated;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2358
qp->rq.max_wqe_idx = max_rqe_allocated - 1;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2450
if (qp->rq.va)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2451
dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2731
if (!qp->srq && qp->rq.va)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2732
dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2767
srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2768
if (!srq->rq.va) {
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2774
srq->rq.entry_size = dev->attr.rqe_size;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2775
srq->rq.pa = pa;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2776
srq->rq.len = len;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2777
srq->rq.max_cnt = max_rqe_allocated;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2795
srq->rq.dbid = rsp->id;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2800
srq->rq.max_cnt = max_rqe_allocated;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2801
srq->rq.max_wqe_idx = max_rqe_allocated - 1;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2802
srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2807
dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2840
cmd->id = srq->rq.dbid;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2866
if (srq->rq.va)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2867
dma_free_coherent(&pdev->dev, srq->rq.len,
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
2868
srq->rq.va, srq->rq.pa);
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
1789
} rq;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1195
uresp.rq_dbid = qp->rq.dbid;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1197
uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1198
uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1199
uresp.num_rqe_allocated = qp->rq.max_cnt;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1261
kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1281
qp->rq.max_sges = attrs->cap.max_recv_sge;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1488
qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1490
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1566
return (qp->rq.tail == qp->rq.head);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1630
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1632
qp->srq->rq.max_wqe_idx;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1635
ocrdma_hwq_inc_tail(&qp->srq->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1640
ocrdma_hwq_inc_tail(&qp->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1723
ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1724
PAGE_ALIGN(qp->rq.len));
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1741
uresp.rq_dbid = srq->rq.dbid;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1743
uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1744
uresp.rq_page_size = srq->rq.len;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1748
uresp.num_rqe_allocated = srq->rq.max_cnt;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1791
srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1798
srq->bit_fields_len = (srq->rq.max_cnt / 32) +
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1799
(srq->rq.max_cnt % 32 ? 1 : 0);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1866
ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1867
PAGE_ALIGN(srq->rq.len));
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2195
u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2236
if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2237
wr->num_sge > qp->rq.max_sges) {
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2242
rqe = ocrdma_hwq_head(&qp->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2245
qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2253
ocrdma_hwq_inc_head(&qp->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2274
BUG_ON(indx >= srq->rq.max_cnt);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2286
u32 val = srq->rq.dbid | (1 << 16);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2304
if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2305
wr->num_sge > srq->rq.max_sges) {
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2311
rqe = ocrdma_hwq_head(&srq->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2320
ocrdma_hwq_inc_head(&srq->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2498
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2499
ocrdma_hwq_inc_tail(&qp->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2636
wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2637
OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2644
ocrdma_hwq_inc_tail(&srq->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2695
ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2698
ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2702
ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2705
ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2711
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2712
ocrdma_hwq_inc_tail(&qp->rq);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2826
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
2827
ocrdma_hwq_inc_tail(&qp->rq);
drivers/infiniband/hw/qedr/qedr.h
378
struct qedr_qp_hwq_info rq;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
106
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
drivers/infiniband/hw/qedr/qedr_roce_cm.c
108
qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
110
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
drivers/infiniband/hw/qedr/qedr_roce_cm.c
112
*((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
drivers/infiniband/hw/qedr/qedr_roce_cm.c
114
*((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
drivers/infiniband/hw/qedr/qedr_roce_cm.c
117
qedr_inc_sw_gsi_cons(&qp->rq);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
339
qp->rq.max_wr = attrs->cap.max_recv_wr;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
342
qp->rqe_wr_id = kzalloc_objs(*qp->rqe_wr_id, qp->rq.max_wr);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
649
memset(&qp->rqe_wr_id[qp->rq.prod], 0,
drivers/infiniband/hw/qedr/qedr_roce_cm.c
650
sizeof(qp->rqe_wr_id[qp->rq.prod]));
drivers/infiniband/hw/qedr/qedr_roce_cm.c
651
qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
drivers/infiniband/hw/qedr/qedr_roce_cm.c
652
qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
654
qedr_inc_sw_prod(&qp->rq);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
679
while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
drivers/infiniband/hw/qedr/qedr_roce_cm.c
683
wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
686
wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
drivers/infiniband/hw/qedr/qedr_roce_cm.c
689
wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
drivers/infiniband/hw/qedr/qedr_roce_cm.c
691
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
694
vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
drivers/infiniband/hw/qedr/qedr_roce_cm.c
699
wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
drivers/infiniband/hw/qedr/qedr_roce_cm.c
703
qedr_inc_sw_cons(&qp->rq);
drivers/infiniband/hw/qedr/qedr_roce_cm.c
723
num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
drivers/infiniband/hw/qedr/verbs.c
1377
qedr_reset_qp_hwq_info(&qp->rq);
drivers/infiniband/hw/qedr/verbs.c
1379
qp->rq.max_sges = attrs->cap.max_recv_sge;
drivers/infiniband/hw/qedr/verbs.c
1382
qp->rq.max_sges, qp->rq_cq->icid);
drivers/infiniband/hw/qedr/verbs.c
1409
qp->rq.db = dev->db_addr +
drivers/infiniband/hw/qedr/verbs.c
1411
qp->rq.db_data.data.icid = qp->icid;
drivers/infiniband/hw/qedr/verbs.c
1412
rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
drivers/infiniband/hw/qedr/verbs.c
1956
qp->rq.max_wr = attrs->cap.max_recv_wr;
drivers/infiniband/hw/qedr/verbs.c
2009
qp->rq.db = dev->db_addr +
drivers/infiniband/hw/qedr/verbs.c
2011
qp->rq.db_data.data.icid = qp->icid;
drivers/infiniband/hw/qedr/verbs.c
2012
qp->rq.iwarp_db2 = dev->db_addr +
drivers/infiniband/hw/qedr/verbs.c
2014
qp->rq.iwarp_db2_data.data.icid = qp->icid;
drivers/infiniband/hw/qedr/verbs.c
2015
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
drivers/infiniband/hw/qedr/verbs.c
2017
rc = qedr_db_recovery_add(dev, qp->rq.db,
drivers/infiniband/hw/qedr/verbs.c
2018
&qp->rq.db_data,
drivers/infiniband/hw/qedr/verbs.c
2024
rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
drivers/infiniband/hw/qedr/verbs.c
2025
&qp->rq.iwarp_db2_data,
drivers/infiniband/hw/qedr/verbs.c
2059
rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
drivers/infiniband/hw/qedr/verbs.c
2063
in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
2064
in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
2124
rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms);
drivers/infiniband/hw/qedr/verbs.c
2144
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
2154
qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
drivers/infiniband/hw/qedr/verbs.c
2157
qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
drivers/infiniband/hw/qedr/verbs.c
2158
&qp->rq.iwarp_db2_data);
drivers/infiniband/hw/qedr/verbs.c
2204
qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
drivers/infiniband/hw/qedr/verbs.c
2207
qp->rqe_wr_id = kzalloc_objs(*qp->rqe_wr_id, qp->rq.max_wr);
drivers/infiniband/hw/qedr/verbs.c
2222
n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
drivers/infiniband/hw/qedr/verbs.c
2389
writel(qp->rq.db_data.raw, qp->rq.db);
drivers/infiniband/hw/qedr/verbs.c
2442
if ((qp->rq.prod != qp->rq.cons) ||
drivers/infiniband/hw/qedr/verbs.c
2446
qp->rq.prod, qp->rq.cons, qp->sq.prod,
drivers/infiniband/hw/qedr/verbs.c
2772
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
drivers/infiniband/hw/qedr/verbs.c
2774
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
drivers/infiniband/hw/qedr/verbs.c
3931
if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
drivers/infiniband/hw/qedr/verbs.c
3933
wr->num_sge > qp->rq.max_sges) {
drivers/infiniband/hw/qedr/verbs.c
3935
qed_chain_get_elem_left_u32(&qp->rq.pbl),
drivers/infiniband/hw/qedr/verbs.c
3937
qp->rq.max_sges);
drivers/infiniband/hw/qedr/verbs.c
3945
qed_chain_produce(&qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
3968
qed_chain_produce(&qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
3980
qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
drivers/infiniband/hw/qedr/verbs.c
3981
qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
drivers/infiniband/hw/qedr/verbs.c
3983
qedr_inc_sw_prod(&qp->rq);
drivers/infiniband/hw/qedr/verbs.c
3994
qp->rq.db_data.data.value++;
drivers/infiniband/hw/qedr/verbs.c
3996
writel(qp->rq.db_data.raw, qp->rq.db);
drivers/infiniband/hw/qedr/verbs.c
3999
writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
drivers/infiniband/hw/qedr/verbs.c
4325
u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
drivers/infiniband/hw/qedr/verbs.c
4329
while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
drivers/infiniband/hw/qedr/verbs.c
4330
qed_chain_consume(&qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
4331
qedr_inc_sw_cons(&qp->rq);
drivers/infiniband/hw/qedr/verbs.c
4341
while (num_entries && qp->rq.wqe_cons != hw_cons) {
drivers/infiniband/hw/qedr/verbs.c
4348
wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
drivers/infiniband/hw/qedr/verbs.c
4353
while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
drivers/infiniband/hw/qedr/verbs.c
4354
qed_chain_consume(&qp->rq.pbl);
drivers/infiniband/hw/qedr/verbs.c
4355
qedr_inc_sw_cons(&qp->rq);
drivers/infiniband/hw/qedr/verbs.c
4364
if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
186
struct pvrdma_wq rq;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
120
if (qp->rq.ring) {
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
121
atomic_set(&qp->rq.ring->cons_head, 0);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
122
atomic_set(&qp->rq.ring->prod_tail, 0);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
140
qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
141
qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
144
req_cap->max_recv_wr = qp->rq.wqe_cnt;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
145
req_cap->max_recv_sge = qp->rq.max_sg;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
147
qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
149
qp->rq.max_sg);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
150
qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
243
spin_lock_init(&qp->rq.lock);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
318
qp->rq.offset = qp->npages_send * PAGE_SIZE;
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
345
qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
643
qp->rq.offset + n * qp->rq.wqe_size);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
899
spin_lock_irqsave(&qp->rq.lock, flags);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
904
if (unlikely(wr->num_sge > qp->rq.max_sg ||
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
914
qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
939
pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
940
qp->rq.wqe_cnt);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
945
spin_unlock_irqrestore(&qp->rq.lock, flags);
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
952
spin_unlock_irqrestore(&qp->rq.lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
1085
if (srq->rq.max_sge > 1)
drivers/infiniband/sw/rdmavt/qp.c
1087
(srq->rq.max_sge - 1);
drivers/infiniband/sw/rdmavt/qp.c
2214
if ((unsigned)wr->num_sge > srq->rq.max_sge) {
drivers/infiniband/sw/rdmavt/qp.c
2219
spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
2220
wq = srq->rq.kwq;
drivers/infiniband/sw/rdmavt/qp.c
2222
if (next >= srq->rq.size)
drivers/infiniband/sw/rdmavt/qp.c
2225
spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
2230
wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
drivers/infiniband/sw/rdmavt/qp.c
2240
spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
2319
static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
drivers/infiniband/sw/rdmavt/qp.c
2324
head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
drivers/infiniband/sw/rdmavt/qp.c
2326
head = rq->kwq->head;
drivers/infiniband/sw/rdmavt/qp.c
2344
struct rvt_rq *rq;
drivers/infiniband/sw/rdmavt/qp.c
2358
rq = &srq->rq;
drivers/infiniband/sw/rdmavt/qp.c
2363
rq = &qp->r_rq;
drivers/infiniband/sw/rdmavt/qp.c
2367
spin_lock_irqsave(&rq->kwq->c_lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
2372
kwq = rq->kwq;
drivers/infiniband/sw/rdmavt/qp.c
2374
wq = rq->wq;
drivers/infiniband/sw/rdmavt/qp.c
2381
if (tail >= rq->size)
drivers/infiniband/sw/rdmavt/qp.c
2385
head = get_rvt_head(rq, ip);
drivers/infiniband/sw/rdmavt/qp.c
2386
kwq->count = rvt_get_rq_count(rq, head, tail);
drivers/infiniband/sw/rdmavt/qp.c
2394
wqe = rvt_get_rwqe_ptr(rq, tail);
drivers/infiniband/sw/rdmavt/qp.c
2400
if (++tail >= rq->size)
drivers/infiniband/sw/rdmavt/qp.c
2422
rvt_get_rq_count(rq,
drivers/infiniband/sw/rdmavt/qp.c
2423
get_rvt_head(rq, ip), tail);
drivers/infiniband/sw/rdmavt/qp.c
2428
spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
2438
spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
drivers/infiniband/sw/rdmavt/qp.c
782
int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
drivers/infiniband/sw/rdmavt/qp.c
786
rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
drivers/infiniband/sw/rdmavt/qp.c
787
if (!rq->wq)
drivers/infiniband/sw/rdmavt/qp.c
790
rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
drivers/infiniband/sw/rdmavt/qp.c
791
if (!rq->kwq)
drivers/infiniband/sw/rdmavt/qp.c
793
rq->kwq->curr_wq = rq->wq->wq;
drivers/infiniband/sw/rdmavt/qp.c
796
rq->kwq =
drivers/infiniband/sw/rdmavt/qp.c
798
if (!rq->kwq)
drivers/infiniband/sw/rdmavt/qp.c
800
rq->kwq->curr_wq = rq->kwq->wq;
drivers/infiniband/sw/rdmavt/qp.c
803
spin_lock_init(&rq->kwq->p_lock);
drivers/infiniband/sw/rdmavt/qp.c
804
spin_lock_init(&rq->kwq->c_lock);
drivers/infiniband/sw/rdmavt/qp.c
807
rvt_free_rq(rq);
drivers/infiniband/sw/rdmavt/qp.h
28
int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
drivers/infiniband/sw/rdmavt/srq.c
110
rvt_free_rq(&srq->rq);
drivers/infiniband/sw/rdmavt/srq.c
145
srq->rq.max_sge * sizeof(struct ib_sge);
drivers/infiniband/sw/rdmavt/srq.c
167
spin_lock_irq(&srq->rq.kwq->c_lock);
drivers/infiniband/sw/rdmavt/srq.c
173
owq = srq->rq.wq;
drivers/infiniband/sw/rdmavt/srq.c
177
okwq = srq->rq.kwq;
drivers/infiniband/sw/rdmavt/srq.c
181
if (head >= srq->rq.size || tail >= srq->rq.size) {
drivers/infiniband/sw/rdmavt/srq.c
187
n += srq->rq.size - tail;
drivers/infiniband/sw/rdmavt/srq.c
200
wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
drivers/infiniband/sw/rdmavt/srq.c
207
if (++tail >= srq->rq.size)
drivers/infiniband/sw/rdmavt/srq.c
210
srq->rq.kwq = tmp_rq.kwq;
drivers/infiniband/sw/rdmavt/srq.c
212
srq->rq.wq = tmp_rq.wq;
drivers/infiniband/sw/rdmavt/srq.c
219
srq->rq.size = size;
drivers/infiniband/sw/rdmavt/srq.c
222
spin_unlock_irq(&srq->rq.kwq->c_lock);
drivers/infiniband/sw/rdmavt/srq.c
256
spin_lock_irq(&srq->rq.kwq->c_lock);
drivers/infiniband/sw/rdmavt/srq.c
257
if (attr->srq_limit >= srq->rq.size)
drivers/infiniband/sw/rdmavt/srq.c
261
spin_unlock_irq(&srq->rq.kwq->c_lock);
drivers/infiniband/sw/rdmavt/srq.c
266
spin_unlock_irq(&srq->rq.kwq->c_lock);
drivers/infiniband/sw/rdmavt/srq.c
283
attr->max_wr = srq->rq.size - 1;
drivers/infiniband/sw/rdmavt/srq.c
284
attr->max_sge = srq->rq.max_sge;
drivers/infiniband/sw/rdmavt/srq.c
304
kvfree(srq->rq.kwq);
drivers/infiniband/sw/rdmavt/srq.c
54
srq->rq.size = srq_init_attr->attr.max_wr + 1;
drivers/infiniband/sw/rdmavt/srq.c
55
srq->rq.max_sge = srq_init_attr->attr.max_sge;
drivers/infiniband/sw/rdmavt/srq.c
56
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
drivers/infiniband/sw/rdmavt/srq.c
58
if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
drivers/infiniband/sw/rdmavt/srq.c
69
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
drivers/infiniband/sw/rdmavt/srq.c
71
srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
drivers/infiniband/sw/rdmavt/srq.c
86
spin_lock_init(&srq->rq.lock);
drivers/infiniband/sw/rxe/rxe_qp.c
224
spin_lock_init(&qp->rq.producer_lock);
drivers/infiniband/sw/rxe/rxe_qp.c
225
spin_lock_init(&qp->rq.consumer_lock);
drivers/infiniband/sw/rxe/rxe_qp.c
335
qp->rq.max_wr = init->cap.max_recv_wr;
drivers/infiniband/sw/rxe/rxe_qp.c
336
qp->rq.max_sge = init->cap.max_recv_sge;
drivers/infiniband/sw/rxe/rxe_qp.c
338
qp->rq.max_sge*sizeof(struct ib_sge);
drivers/infiniband/sw/rxe/rxe_qp.c
340
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
drivers/infiniband/sw/rxe/rxe_qp.c
342
if (!qp->rq.queue) {
drivers/infiniband/sw/rxe/rxe_qp.c
350
qp->rq.queue->buf, qp->rq.queue->buf_size,
drivers/infiniband/sw/rxe/rxe_qp.c
351
&qp->rq.queue->ip);
drivers/infiniband/sw/rxe/rxe_qp.c
360
init->cap.max_recv_wr = qp->rq.max_wr;
drivers/infiniband/sw/rxe/rxe_qp.c
365
vfree(qp->rq.queue->buf);
drivers/infiniband/sw/rxe/rxe_qp.c
366
kfree(qp->rq.queue);
drivers/infiniband/sw/rxe/rxe_qp.c
367
qp->rq.queue = NULL;
drivers/infiniband/sw/rxe/rxe_qp.c
475
init->cap.max_recv_wr = qp->rq.max_wr;
drivers/infiniband/sw/rxe/rxe_qp.c
476
init->cap.max_recv_sge = qp->rq.max_sge;
drivers/infiniband/sw/rxe/rxe_qp.c
572
if (qp->rq.queue)
drivers/infiniband/sw/rxe/rxe_qp.c
573
rxe_queue_reset(qp->rq.queue);
drivers/infiniband/sw/rxe/rxe_qp.c
816
attr->cap.max_recv_wr = qp->rq.max_wr;
drivers/infiniband/sw/rxe/rxe_qp.c
817
attr->cap.max_recv_sge = qp->rq.max_sge;
drivers/infiniband/sw/rxe/rxe_qp.c
889
if (qp->rq.queue)
drivers/infiniband/sw/rxe/rxe_qp.c
890
rxe_queue_cleanup(qp->rq.queue);
drivers/infiniband/sw/rxe/rxe_resp.c
1160
queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_resp.c
1474
struct rxe_queue *q = qp->rq.queue;
drivers/infiniband/sw/rxe/rxe_resp.c
1491
if (!qp->rq.queue)
drivers/infiniband/sw/rxe/rxe_resp.c
262
struct rxe_queue *q = srq->rq.queue;
drivers/infiniband/sw/rxe/rxe_resp.c
272
spin_lock_irqsave(&srq->rq.consumer_lock, flags);
drivers/infiniband/sw/rxe/rxe_resp.c
276
spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
drivers/infiniband/sw/rxe/rxe_resp.c
281
if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
drivers/infiniband/sw/rxe/rxe_resp.c
282
spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
drivers/infiniband/sw/rxe/rxe_resp.c
298
spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
drivers/infiniband/sw/rxe/rxe_resp.c
302
spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
drivers/infiniband/sw/rxe/rxe_resp.c
330
qp->resp.wqe = queue_head(qp->rq.queue,
drivers/infiniband/sw/rxe/rxe_srq.c
137
if (attr->srq_limit > srq->rq.queue->buf->index_mask) {
drivers/infiniband/sw/rxe/rxe_srq.c
140
srq->rq.queue->buf->index_mask);
drivers/infiniband/sw/rxe/rxe_srq.c
155
struct rxe_queue *q = srq->rq.queue;
drivers/infiniband/sw/rxe/rxe_srq.c
168
srq->rq.max_sge*sizeof(struct ib_sge);
drivers/infiniband/sw/rxe/rxe_srq.c
171
udata, mi, &srq->rq.producer_lock,
drivers/infiniband/sw/rxe/rxe_srq.c
172
&srq->rq.consumer_lock);
drivers/infiniband/sw/rxe/rxe_srq.c
176
srq->rq.max_wr = attr->max_wr;
drivers/infiniband/sw/rxe/rxe_srq.c
192
if (srq->rq.queue)
drivers/infiniband/sw/rxe/rxe_srq.c
193
rxe_queue_cleanup(srq->rq.queue);
drivers/infiniband/sw/rxe/rxe_srq.c
56
srq->rq.max_wr = init->attr.max_wr;
drivers/infiniband/sw/rxe/rxe_srq.c
57
srq->rq.max_sge = init->attr.max_sge;
drivers/infiniband/sw/rxe/rxe_srq.c
60
srq->rq.max_sge*sizeof(struct ib_sge);
drivers/infiniband/sw/rxe/rxe_srq.c
62
spin_lock_init(&srq->rq.producer_lock);
drivers/infiniband/sw/rxe/rxe_srq.c
63
spin_lock_init(&srq->rq.consumer_lock);
drivers/infiniband/sw/rxe/rxe_srq.c
65
q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
drivers/infiniband/sw/rxe/rxe_srq.c
88
srq->rq.queue = q;
drivers/infiniband/sw/rxe/rxe_srq.c
89
init->attr.max_wr = srq->rq.max_wr;
drivers/infiniband/sw/rxe/rxe_verbs.c
1002
recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
drivers/infiniband/sw/rxe/rxe_verbs.c
1013
queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
drivers/infiniband/sw/rxe/rxe_verbs.c
1027
struct rxe_rq *rq = &qp->rq;
drivers/infiniband/sw/rxe/rxe_verbs.c
1053
spin_lock_irqsave(&rq->producer_lock, flags);
drivers/infiniband/sw/rxe/rxe_verbs.c
1056
err = post_one_recv(rq, wr);
drivers/infiniband/sw/rxe/rxe_verbs.c
1064
spin_unlock_irqrestore(&rq->producer_lock, flags);
drivers/infiniband/sw/rxe/rxe_verbs.c
15
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
drivers/infiniband/sw/rxe/rxe_verbs.c
499
attr->max_wr = srq->rq.queue->buf->index_mask;
drivers/infiniband/sw/rxe/rxe_verbs.c
500
attr->max_sge = srq->rq.max_sge;
drivers/infiniband/sw/rxe/rxe_verbs.c
516
spin_lock_irqsave(&srq->rq.producer_lock, flags);
drivers/infiniband/sw/rxe/rxe_verbs.c
519
err = post_one_recv(&srq->rq, wr);
drivers/infiniband/sw/rxe/rxe_verbs.c
525
spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
drivers/infiniband/sw/rxe/rxe_verbs.c
970
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
drivers/infiniband/sw/rxe/rxe_verbs.c
979
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
drivers/infiniband/sw/rxe/rxe_verbs.c
986
if (unlikely(num_sge > rq->max_sge)) {
drivers/infiniband/sw/rxe/rxe_verbs.h
253
struct rxe_rq rq;
drivers/infiniband/sw/rxe/rxe_verbs.h
98
struct rxe_rq rq;
drivers/infiniband/ulp/srp/ib_srp.c
2155
struct request *rq = scsi_cmd_to_rq(scmnd);
drivers/infiniband/ulp/srp/ib_srp.c
2170
WARN_ON_ONCE(rq->tag < 0);
drivers/infiniband/ulp/srp/ib_srp.c
2171
tag = blk_mq_unique_tag(rq);
drivers/input/misc/xen-kbdfront.c
181
static irqreturn_t input_handler(int rq, void *dev_id)
drivers/isdn/hardware/mISDN/avmfritz.c
896
open_bchannel(struct fritzcard *fc, struct channel_req *rq)
drivers/isdn/hardware/mISDN/avmfritz.c
900
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/avmfritz.c
902
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/avmfritz.c
904
bch = &fc->bch[rq->adr.channel - 1];
drivers/isdn/hardware/mISDN/avmfritz.c
907
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/avmfritz.c
908
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/avmfritz.c
921
struct channel_req *rq;
drivers/isdn/hardware/mISDN/avmfritz.c
927
rq = arg;
drivers/isdn/hardware/mISDN/avmfritz.c
928
if (rq->protocol == ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/avmfritz.c
929
err = fc->isac.open(&fc->isac, rq);
drivers/isdn/hardware/mISDN/avmfritz.c
931
err = open_bchannel(fc, rq);
drivers/isdn/hardware/mISDN/hfcmulti.c
4018
struct channel_req *rq)
drivers/isdn/hardware/mISDN/hfcmulti.c
4026
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/hfcmulti.c
4029
(dch->dev.D.protocol != rq->protocol)) {
drivers/isdn/hardware/mISDN/hfcmulti.c
4032
__func__, dch->dev.D.protocol, rq->protocol);
drivers/isdn/hardware/mISDN/hfcmulti.c
4035
(rq->protocol != ISDN_P_TE_S0))
drivers/isdn/hardware/mISDN/hfcmulti.c
4037
if (dch->dev.D.protocol != rq->protocol) {
drivers/isdn/hardware/mISDN/hfcmulti.c
4038
if (rq->protocol == ISDN_P_TE_S0) {
drivers/isdn/hardware/mISDN/hfcmulti.c
4043
dch->dev.D.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcmulti.c
4051
rq->ch = &dch->dev.D;
drivers/isdn/hardware/mISDN/hfcmulti.c
4059
struct channel_req *rq)
drivers/isdn/hardware/mISDN/hfcmulti.c
4064
if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
drivers/isdn/hardware/mISDN/hfcmulti.c
4066
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/hfcmulti.c
4069
ch = rq->adr.channel;
drivers/isdn/hardware/mISDN/hfcmulti.c
4071
ch = (rq->adr.channel - 1) + (dch->slot - 2);
drivers/isdn/hardware/mISDN/hfcmulti.c
4080
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcmulti.c
4082
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/hfcmulti.c
4148
struct channel_req *rq;
drivers/isdn/hardware/mISDN/hfcmulti.c
4157
rq = arg;
drivers/isdn/hardware/mISDN/hfcmulti.c
4158
switch (rq->protocol) {
drivers/isdn/hardware/mISDN/hfcmulti.c
4165
err = open_dchannel(hc, dch, rq); /* locked there */
drivers/isdn/hardware/mISDN/hfcmulti.c
4173
err = open_dchannel(hc, dch, rq); /* locked there */
drivers/isdn/hardware/mISDN/hfcmulti.c
4177
err = open_bchannel(hc, dch, rq);
drivers/isdn/hardware/mISDN/hfcpci.c
1881
struct channel_req *rq)
drivers/isdn/hardware/mISDN/hfcpci.c
1888
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/hfcpci.c
1890
if (rq->adr.channel == 1) {
drivers/isdn/hardware/mISDN/hfcpci.c
1895
if (rq->protocol == ISDN_P_TE_S0) {
drivers/isdn/hardware/mISDN/hfcpci.c
1900
hc->hw.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcpci.c
1901
ch->protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcpci.c
1906
if (rq->protocol != ch->protocol) {
drivers/isdn/hardware/mISDN/hfcpci.c
1909
if (rq->protocol == ISDN_P_TE_S0) {
drivers/isdn/hardware/mISDN/hfcpci.c
1914
hc->hw.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcpci.c
1915
ch->protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcpci.c
1925
rq->ch = ch;
drivers/isdn/hardware/mISDN/hfcpci.c
1932
open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
drivers/isdn/hardware/mISDN/hfcpci.c
1936
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/hfcpci.c
1938
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/hfcpci.c
1940
bch = &hc->bch[rq->adr.channel - 1];
drivers/isdn/hardware/mISDN/hfcpci.c
1943
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcpci.c
1944
rq->ch = &bch->ch; /* TODO: E-channel */
drivers/isdn/hardware/mISDN/hfcpci.c
1959
struct channel_req *rq;
drivers/isdn/hardware/mISDN/hfcpci.c
1967
rq = arg;
drivers/isdn/hardware/mISDN/hfcpci.c
1968
if ((rq->protocol == ISDN_P_TE_S0) ||
drivers/isdn/hardware/mISDN/hfcpci.c
1969
(rq->protocol == ISDN_P_NT_S0))
drivers/isdn/hardware/mISDN/hfcpci.c
1970
err = open_dchannel(hc, ch, rq);
drivers/isdn/hardware/mISDN/hfcpci.c
1972
err = open_bchannel(hc, rq);
drivers/isdn/hardware/mISDN/hfcsusb.c
417
struct channel_req *rq)
drivers/isdn/hardware/mISDN/hfcsusb.c
423
hw->name, __func__, hw->dch.dev.id, rq->adr.channel,
drivers/isdn/hardware/mISDN/hfcsusb.c
425
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/hfcsusb.c
433
if (rq->adr.channel == 1) {
drivers/isdn/hardware/mISDN/hfcsusb.c
444
hw->protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcsusb.c
445
if (rq->protocol == ISDN_P_TE_S0) {
drivers/isdn/hardware/mISDN/hfcsusb.c
451
ch->protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcsusb.c
454
if (rq->protocol != ch->protocol)
drivers/isdn/hardware/mISDN/hfcsusb.c
462
rq->ch = ch;
drivers/isdn/hardware/mISDN/hfcsusb.c
470
open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
drivers/isdn/hardware/mISDN/hfcsusb.c
474
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/hfcsusb.c
476
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/hfcsusb.c
481
hw->name, __func__, rq->adr.channel);
drivers/isdn/hardware/mISDN/hfcsusb.c
483
bch = &hw->bch[rq->adr.channel - 1];
drivers/isdn/hardware/mISDN/hfcsusb.c
486
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/hfcsusb.c
487
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/hfcsusb.c
527
struct channel_req *rq;
drivers/isdn/hardware/mISDN/hfcsusb.c
535
rq = arg;
drivers/isdn/hardware/mISDN/hfcsusb.c
536
if ((rq->protocol == ISDN_P_TE_S0) ||
drivers/isdn/hardware/mISDN/hfcsusb.c
537
(rq->protocol == ISDN_P_NT_S0))
drivers/isdn/hardware/mISDN/hfcsusb.c
538
err = open_dchannel(hw, ch, rq);
drivers/isdn/hardware/mISDN/hfcsusb.c
540
err = open_bchannel(hw, rq);
drivers/isdn/hardware/mISDN/mISDNipac.c
1480
open_bchannel(struct ipac_hw *ipac, struct channel_req *rq)
drivers/isdn/hardware/mISDN/mISDNipac.c
1484
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/mISDNipac.c
1486
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/mISDNipac.c
1488
bch = &ipac->hscx[rq->adr.channel - 1].bch;
drivers/isdn/hardware/mISDN/mISDNipac.c
1492
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/mISDNipac.c
1493
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/mISDNipac.c
1532
struct channel_req *rq;
drivers/isdn/hardware/mISDN/mISDNipac.c
1538
rq = arg;
drivers/isdn/hardware/mISDN/mISDNipac.c
1539
if (rq->protocol == ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/mISDNipac.c
1540
err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
drivers/isdn/hardware/mISDN/mISDNipac.c
1542
err = open_bchannel(ipac, rq);
drivers/isdn/hardware/mISDN/mISDNipac.c
744
open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
drivers/isdn/hardware/mISDN/mISDNipac.c
748
if (rq->protocol != ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/mISDNipac.c
750
if (rq->adr.channel == 1)
drivers/isdn/hardware/mISDN/mISDNipac.c
753
rq->ch = &isac->dch.dev.D;
drivers/isdn/hardware/mISDN/mISDNipac.c
754
rq->ch->protocol = rq->protocol;
drivers/isdn/hardware/mISDN/mISDNipac.c
756
_queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
drivers/isdn/hardware/mISDN/mISDNipac.c
762
open_dchannel(struct isac_hw *isac, struct channel_req *rq)
drivers/isdn/hardware/mISDN/mISDNipac.c
764
return open_dchannel_caller(isac, rq, __builtin_return_address(0));
drivers/isdn/hardware/mISDN/mISDNisar.c
1636
isar_open(struct isar_hw *isar, struct channel_req *rq)
drivers/isdn/hardware/mISDN/mISDNisar.c
1640
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/mISDNisar.c
1642
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/mISDNisar.c
1644
bch = &isar->ch[rq->adr.channel - 1].bch;
drivers/isdn/hardware/mISDN/mISDNisar.c
1647
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/mISDNisar.c
1648
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/netjet.c
850
open_bchannel(struct tiger_hw *card, struct channel_req *rq)
drivers/isdn/hardware/mISDN/netjet.c
854
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/netjet.c
856
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/netjet.c
858
bch = &card->bc[rq->adr.channel - 1].bch;
drivers/isdn/hardware/mISDN/netjet.c
862
bch->ch.protocol = rq->protocol;
drivers/isdn/hardware/mISDN/netjet.c
863
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/netjet.c
876
struct channel_req *rq;
drivers/isdn/hardware/mISDN/netjet.c
882
rq = arg;
drivers/isdn/hardware/mISDN/netjet.c
883
if (rq->protocol == ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/netjet.c
884
err = card->isac.open(&card->isac, rq);
drivers/isdn/hardware/mISDN/netjet.c
886
err = open_bchannel(card, rq);
drivers/isdn/hardware/mISDN/speedfax.c
242
struct channel_req *rq;
drivers/isdn/hardware/mISDN/speedfax.c
248
rq = arg;
drivers/isdn/hardware/mISDN/speedfax.c
249
if (rq->protocol == ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/speedfax.c
250
err = sf->isac.open(&sf->isac, rq);
drivers/isdn/hardware/mISDN/speedfax.c
252
err = sf->isar.open(&sf->isar, rq);
drivers/isdn/hardware/mISDN/w6692.c
1000
rq->ch = &bch->ch;
drivers/isdn/hardware/mISDN/w6692.c
1159
open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
drivers/isdn/hardware/mISDN/w6692.c
1163
if (rq->protocol != ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/w6692.c
1165
if (rq->adr.channel == 1)
drivers/isdn/hardware/mISDN/w6692.c
1168
rq->ch = &card->dch.dev.D;
drivers/isdn/hardware/mISDN/w6692.c
1169
rq->ch->protocol = rq->protocol;
drivers/isdn/hardware/mISDN/w6692.c
1171
_queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
drivers/isdn/hardware/mISDN/w6692.c
1182
struct channel_req *rq;
drivers/isdn/hardware/mISDN/w6692.c
1188
rq = arg;
drivers/isdn/hardware/mISDN/w6692.c
1189
if (rq->protocol == ISDN_P_TE_S0)
drivers/isdn/hardware/mISDN/w6692.c
1190
err = open_dchannel(card, rq, __builtin_return_address(0));
drivers/isdn/hardware/mISDN/w6692.c
1192
err = open_bchannel(card, rq);
drivers/isdn/hardware/mISDN/w6692.c
988
open_bchannel(struct w6692_hw *card, struct channel_req *rq)
drivers/isdn/hardware/mISDN/w6692.c
992
if (rq->adr.channel == 0 || rq->adr.channel > 2)
drivers/isdn/hardware/mISDN/w6692.c
994
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/hardware/mISDN/w6692.c
996
bch = &card->bc[rq->adr.channel - 1].bch;
drivers/isdn/hardware/mISDN/w6692.c
999
bch->ch.protocol = rq->protocol;
drivers/isdn/mISDN/l1oip_core.c
1006
open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
drivers/isdn/mISDN/l1oip_core.c
1011
if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
drivers/isdn/mISDN/l1oip_core.c
1013
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/mISDN/l1oip_core.c
1015
ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */
drivers/isdn/mISDN/l1oip_core.c
1024
bch->ch.protocol = rq->protocol;
drivers/isdn/mISDN/l1oip_core.c
1025
rq->ch = &bch->ch;
drivers/isdn/mISDN/l1oip_core.c
1037
struct channel_req *rq;
drivers/isdn/mISDN/l1oip_core.c
1045
rq = arg;
drivers/isdn/mISDN/l1oip_core.c
1046
switch (rq->protocol) {
drivers/isdn/mISDN/l1oip_core.c
1053
err = open_dchannel(hc, dch, rq);
drivers/isdn/mISDN/l1oip_core.c
1061
err = open_dchannel(hc, dch, rq);
drivers/isdn/mISDN/l1oip_core.c
1064
err = open_bchannel(hc, dch, rq);
drivers/isdn/mISDN/l1oip_core.c
979
open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
drivers/isdn/mISDN/l1oip_core.c
984
if (rq->protocol == ISDN_P_NONE)
drivers/isdn/mISDN/l1oip_core.c
987
(dch->dev.D.protocol != rq->protocol)) {
drivers/isdn/mISDN/l1oip_core.c
990
__func__, dch->dev.D.protocol, rq->protocol);
drivers/isdn/mISDN/l1oip_core.c
992
if (dch->dev.D.protocol != rq->protocol)
drivers/isdn/mISDN/l1oip_core.c
993
dch->dev.D.protocol = rq->protocol;
drivers/isdn/mISDN/l1oip_core.c
999
rq->ch = &dch->dev.D;
drivers/isdn/mISDN/layer2.c
2112
struct channel_req rq;
drivers/isdn/mISDN/layer2.c
2145
rq.protocol = ISDN_P_NT_E1;
drivers/isdn/mISDN/layer2.c
2147
rq.protocol = ISDN_P_NT_S0;
drivers/isdn/mISDN/layer2.c
2148
rq.adr.channel = 0;
drivers/isdn/mISDN/layer2.c
2149
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/layer2.c
2170
rq.protocol = ISDN_P_TE_E1;
drivers/isdn/mISDN/layer2.c
2172
rq.protocol = ISDN_P_TE_S0;
drivers/isdn/mISDN/layer2.c
2173
rq.adr.channel = 0;
drivers/isdn/mISDN/layer2.c
2174
l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/stack.c
421
struct channel_req rq;
drivers/isdn/mISDN/stack.c
437
rq.protocol = protocol;
drivers/isdn/mISDN/stack.c
438
rq.adr.channel = adr->channel;
drivers/isdn/mISDN/stack.c
439
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/stack.c
458
struct channel_req rq, rq2;
drivers/isdn/mISDN/stack.c
470
rq.protocol = protocol;
drivers/isdn/mISDN/stack.c
471
rq.adr = *adr;
drivers/isdn/mISDN/stack.c
472
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/stack.c
475
ch->recv = rq.ch->send;
drivers/isdn/mISDN/stack.c
476
ch->peer = rq.ch;
drivers/isdn/mISDN/stack.c
477
rq.ch->recv = ch->send;
drivers/isdn/mISDN/stack.c
478
rq.ch->peer = ch;
drivers/isdn/mISDN/stack.c
479
rq.ch->st = dev->D.st;
drivers/isdn/mISDN/stack.c
493
rq.protocol = rq2.protocol;
drivers/isdn/mISDN/stack.c
494
rq.adr = *adr;
drivers/isdn/mISDN/stack.c
495
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/stack.c
500
rq2.ch->recv = rq.ch->send;
drivers/isdn/mISDN/stack.c
501
rq2.ch->peer = rq.ch;
drivers/isdn/mISDN/stack.c
502
rq.ch->recv = rq2.ch->send;
drivers/isdn/mISDN/stack.c
503
rq.ch->peer = rq2.ch;
drivers/isdn/mISDN/stack.c
504
rq.ch->st = dev->D.st;
drivers/isdn/mISDN/stack.c
507
ch->nr = rq.ch->nr;
drivers/isdn/mISDN/stack.c
515
struct channel_req rq;
drivers/isdn/mISDN/stack.c
523
rq.protocol = ISDN_P_TE_S0;
drivers/isdn/mISDN/stack.c
525
rq.protocol = ISDN_P_TE_E1;
drivers/isdn/mISDN/stack.c
528
rq.protocol = ISDN_P_NT_S0;
drivers/isdn/mISDN/stack.c
530
rq.protocol = ISDN_P_NT_E1;
drivers/isdn/mISDN/stack.c
536
rq.adr.channel = 0;
drivers/isdn/mISDN/stack.c
537
err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/stack.c
541
rq.protocol = protocol;
drivers/isdn/mISDN/stack.c
542
rq.adr = *adr;
drivers/isdn/mISDN/stack.c
543
rq.ch = ch;
drivers/isdn/mISDN/stack.c
544
err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
drivers/isdn/mISDN/stack.c
547
if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
drivers/isdn/mISDN/stack.c
549
add_layer2(rq.ch, dev->D.st);
drivers/isdn/mISDN/stack.c
550
rq.ch->recv = mISDN_queue_message;
drivers/isdn/mISDN/stack.c
551
rq.ch->peer = &dev->D.st->own;
drivers/isdn/mISDN/stack.c
552
rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
drivers/isdn/mISDN/tei.c
788
struct channel_req rq;
drivers/isdn/mISDN/tei.c
797
rq.protocol = ISDN_P_NT_E1;
drivers/isdn/mISDN/tei.c
799
rq.protocol = ISDN_P_NT_S0;
drivers/isdn/mISDN/tei.c
836
rq.adr.dev = mgr->ch.st->dev->id;
drivers/isdn/mISDN/tei.c
837
id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
drivers/md/dm-mpath.c
506
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
drivers/md/dm-mpath.c
511
size_t nr_bytes = blk_rq_bytes(rq);
drivers/md/dm-mpath.c
539
clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
drivers/md/dm-rq.c
129
static struct dm_rq_target_io *tio_from_request(struct request *rq)
drivers/md/dm-rq.c
131
return blk_mq_rq_to_pdu(rq);
drivers/md/dm-rq.c
168
struct request *rq = tio->orig;
drivers/md/dm-rq.c
173
rq_end_stats(md, rq);
drivers/md/dm-rq.c
174
blk_mq_end_request(rq, error);
drivers/md/dm-rq.c
189
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
drivers/md/dm-rq.c
191
blk_mq_requeue_request(rq, false);
drivers/md/dm-rq.c
192
__dm_mq_kick_requeue_list(rq->q, msecs);
drivers/md/dm-rq.c
198
struct request *rq = tio->orig;
drivers/md/dm-rq.c
201
rq_end_stats(md, rq);
drivers/md/dm-rq.c
207
dm_mq_delay_requeue_request(rq, delay_ms);
drivers/md/dm-rq.c
258
static void dm_softirq_done(struct request *rq)
drivers/md/dm-rq.c
261
struct dm_rq_target_io *tio = tio_from_request(rq);
drivers/md/dm-rq.c
267
rq_end_stats(md, rq);
drivers/md/dm-rq.c
268
blk_mq_end_request(rq, tio->error);
drivers/md/dm-rq.c
273
if (rq->rq_flags & RQF_FAILED)
drivers/md/dm-rq.c
283
static void dm_complete_request(struct request *rq, blk_status_t error)
drivers/md/dm-rq.c
285
struct dm_rq_target_io *tio = tio_from_request(rq);
drivers/md/dm-rq.c
288
blk_mq_complete_request(rq);
drivers/md/dm-rq.c
297
static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
drivers/md/dm-rq.c
299
rq->rq_flags |= RQF_FAILED;
drivers/md/dm-rq.c
300
dm_complete_request(rq, error);
drivers/md/dm-rq.c
327
static int setup_clone(struct request *clone, struct request *rq,
drivers/md/dm-rq.c
332
r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
drivers/md/dm-rq.c
345
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
drivers/md/dm-rq.c
351
tio->orig = rq;
drivers/md/dm-rq.c
374
struct request *rq = tio->orig;
drivers/md/dm-rq.c
378
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
drivers/md/dm-rq.c
384
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
drivers/md/dm-rq.c
392
blk_rq_pos(rq));
drivers/md/dm-rq.c
406
dm_complete_request(rq, ret);
drivers/md/dm-rq.c
418
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
drivers/md/dm-rq.c
464
static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
drivers/md/dm-rq.c
468
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
drivers/md/dm-rq.c
487
struct request *rq = bd->rq;
drivers/md/dm-rq.c
488
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
drivers/md/dm-rq.c
518
dm_start_request(md, rq);
drivers/md/dm-rq.c
521
init_tio(tio, rq, md);
drivers/md/dm-rq.c
531
rq_end_stats(md, rq);
drivers/md/dm-target.c
199
static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
drivers/memstick/core/ms_block.c
1979
struct request *req = bd->rq;
drivers/memstick/core/mspro_block.c
765
blk_mq_start_request(bd->rq);
drivers/memstick/core/mspro_block.c
769
msb->block_req = bd->rq;
drivers/memstick/core/mspro_block.c
770
blk_mq_start_request(bd->rq);
drivers/mmc/core/block.c
2841
struct request *rq;
drivers/mmc/core/block.c
2908
rq = blk_mq_alloc_request(md->queue.queue, REQ_OP_DRV_OUT, 0);
drivers/mmc/core/block.c
2909
if (IS_ERR(rq)) {
drivers/mmc/core/block.c
2910
ret = PTR_ERR(rq);
drivers/mmc/core/block.c
2914
mq_rq = req_to_mmc_queue_req(rq);
drivers/mmc/core/block.c
2919
blk_execute_rq(rq, false);
drivers/mmc/core/block.c
2920
ret = req_to_mmc_queue_req(rq)->drv_op_result;
drivers/mmc/core/block.c
2922
blk_mq_free_request(rq);
drivers/mmc/core/mmc_test.c
2361
struct mmc_test_req *rq = mmc_test_req_alloc();
drivers/mmc/core/mmc_test.c
2371
if (!rq)
drivers/mmc/core/mmc_test.c
2374
mrq = &rq->mrq;
drivers/mmc/core/mmc_test.c
2376
mrq->sbc = &rq->sbc;
drivers/mmc/core/mmc_test.c
2403
cmd_ret = mmc_test_send_status(test, &rq->status);
drivers/mmc/core/mmc_test.c
2407
status = rq->status.resp[0];
drivers/mmc/core/mmc_test.c
2466
kfree(rq);
drivers/mmc/core/mmc_test.c
757
static void mmc_test_req_reset(struct mmc_test_req *rq)
drivers/mmc/core/mmc_test.c
759
memset(rq, 0, sizeof(struct mmc_test_req));
drivers/mmc/core/mmc_test.c
761
rq->mrq.cmd = &rq->cmd;
drivers/mmc/core/mmc_test.c
762
rq->mrq.data = &rq->data;
drivers/mmc/core/mmc_test.c
763
rq->mrq.stop = &rq->stop;
drivers/mmc/core/mmc_test.c
768
struct mmc_test_req *rq = kmalloc_obj(*rq);
drivers/mmc/core/mmc_test.c
770
if (rq)
drivers/mmc/core/mmc_test.c
771
mmc_test_req_reset(rq);
drivers/mmc/core/mmc_test.c
773
return rq;
drivers/mmc/core/queue.c
232
struct request *req = bd->rq;
drivers/mmc/core/queue.h
25
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
drivers/mmc/core/queue.h
27
return blk_mq_rq_to_pdu(rq);
drivers/mmc/host/omap.c
254
struct mmc_request *rq;
drivers/mmc/host/omap.c
259
rq = next_slot->mrq;
drivers/mmc/host/omap.c
261
mmc_omap_start_request(host, rq);
drivers/mmc/host/sdhci-esdhc-mcf.c
227
int i, q, ri, rq;
drivers/mmc/host/sdhci-esdhc-mcf.c
264
rq = q;
drivers/mmc/host/sdhci-esdhc-mcf.c
272
temp = ((sdclkfs[ri] >> 1) << 8) | ((rq - 1) << 4) |
drivers/mtd/mtd_blkdevs.c
108
struct request *rq;
drivers/mtd/mtd_blkdevs.c
110
rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
drivers/mtd/mtd_blkdevs.c
111
if (rq) {
drivers/mtd/mtd_blkdevs.c
112
list_del_init(&rq->queuelist);
drivers/mtd/mtd_blkdevs.c
113
blk_mq_start_request(rq);
drivers/mtd/mtd_blkdevs.c
114
return rq;
drivers/mtd/mtd_blkdevs.c
173
blk_mq_start_request(bd->rq);
drivers/mtd/mtd_blkdevs.c
178
list_add_tail(&bd->rq->queuelist, &dev->rq_list);
drivers/mtd/mtd_blkdevs.c
350
new->rq = new->disk->queue;
drivers/mtd/mtd_blkdevs.c
377
gd->queue = new->rq;
drivers/mtd/mtd_blkdevs.c
420
old->rq->queuedata = NULL;
drivers/mtd/mtd_blkdevs.c
424
memflags = blk_mq_freeze_queue(old->rq);
drivers/mtd/mtd_blkdevs.c
425
blk_mq_quiesce_queue(old->rq);
drivers/mtd/mtd_blkdevs.c
426
blk_mq_unquiesce_queue(old->rq);
drivers/mtd/mtd_blkdevs.c
427
blk_mq_unfreeze_queue(old->rq, memflags);
drivers/mtd/ubi/block.c
305
switch (req_op(bd->rq)) {
drivers/mtd/ubi/block.c
307
return ubiblock_read(bd->rq);
drivers/mtd/ubi/block.c
422
dev->rq = gd->queue;
drivers/mtd/ubi/block.c
81
struct request_queue *rq;
drivers/net/dsa/hirschmann/hellcreek_ptp.c
228
struct ptp_clock_request *rq, int on)
drivers/net/dsa/mv88e6xxx/chip.h
731
struct ptp_clock_request *rq, int on);
drivers/net/dsa/mv88e6xxx/ptp.c
312
struct ptp_clock_request *rq, int on)
drivers/net/dsa/mv88e6xxx/ptp.c
314
int rising = (rq->extts.flags & PTP_RISING_EDGE);
drivers/net/dsa/mv88e6xxx/ptp.c
320
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
drivers/net/dsa/mv88e6xxx/ptp.c
321
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
drivers/net/dsa/mv88e6xxx/ptp.c
322
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
drivers/net/dsa/mv88e6xxx/ptp.c
325
pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
drivers/net/dsa/mv88e6xxx/ptp.c
358
struct ptp_clock_request *rq, int on)
drivers/net/dsa/mv88e6xxx/ptp.c
362
switch (rq->type) {
drivers/net/dsa/mv88e6xxx/ptp.c
364
return mv88e6352_ptp_enable_extts(chip, rq, on);
drivers/net/ethernet/3com/3c574_cs.c
1034
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/3com/3c574_cs.c
1038
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/3com/3c574_cs.c
1042
dev->name, rq->ifr_ifrn.ifrn_name, cmd,
drivers/net/ethernet/3com/3c574_cs.c
238
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/3com/3c59x.c
3029
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/3com/3c59x.c
3042
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
drivers/net/ethernet/3com/3c59x.c
777
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/8390/axnet_cs.c
608
static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/8390/axnet_cs.c
611
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/8390/axnet_cs.c
81
static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/8390/pcnet_cs.c
1108
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/8390/pcnet_cs.c
1111
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/8390/pcnet_cs.c
98
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/adaptec/starfire.c
1898
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/adaptec/starfire.c
1901
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/adaptec/starfire.c
574
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/adi/adin1110.c
786
static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
drivers/net/ethernet/adi/adin1110.c
791
return phy_do_ioctl(netdev, rq, cmd);
drivers/net/ethernet/amazon/ena/ena_phc.c
22
struct ptp_clock_request *rq,
drivers/net/ethernet/amd/pcnet32.c
2780
static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/amd/pcnet32.c
2789
rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
390
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
393
struct ptp_clock_time *t = &rq->perout.period;
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
394
struct ptp_clock_time *s = &rq->perout.start;
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
397
u32 pin_index = rq->perout.index;
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
427
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
461
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
465
u32 pin_index = rq->extts.index;
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
488
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
490
switch (rq->type) {
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
492
return aq_ptp_extts_pin_configure(ptp, rq, on);
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
494
return aq_ptp_perout_pin_configure(ptp, rq, on);
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
496
return aq_ptp_pps_pin_configure(ptp, rq, on);
drivers/net/ethernet/broadcom/bcm63xx_enet.c
1600
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/broadcom/bcm63xx_enet.c
1608
return phy_mii_ioctl(dev->phydev, rq, cmd);
drivers/net/ethernet/broadcom/bcm63xx_enet.c
1618
return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
drivers/net/ethernet/broadcom/bcm63xx_enet.c
2431
static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/broadcom/bcm63xx_enet.c
2441
return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
13789
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
441
struct ptp_clock_request *rq)
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
450
ts.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
451
ts.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
477
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
485
switch (rq->type) {
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
489
rq->extts.index);
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
504
rq->perout.index);
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
512
rc = bnxt_ptp_perout_cfg(ptp, rq);
drivers/net/ethernet/broadcom/cnic.c
1786
ictx->ustorm_st_context.ring.rq.pbl_base.lo =
drivers/net/ethernet/broadcom/cnic.c
1788
ictx->ustorm_st_context.ring.rq.pbl_base.hi =
drivers/net/ethernet/broadcom/cnic.c
1790
ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
drivers/net/ethernet/broadcom/cnic.c
1791
ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
drivers/net/ethernet/broadcom/cnic_defs.h
3891
struct ustorm_iscsi_rq_db rq;
drivers/net/ethernet/broadcom/sb1250-mac.c
2465
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/broadcom/sb1250-mac.c
2472
return phy_mii_ioctl(sc->phy_dev, rq, cmd);
drivers/net/ethernet/broadcom/sb1250-mac.c
299
static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/broadcom/tg3.c
6238
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/broadcom/tg3.c
6244
switch (rq->type) {
drivers/net/ethernet/broadcom/tg3.c
6247
if (rq->perout.flags)
drivers/net/ethernet/broadcom/tg3.c
6250
if (rq->perout.index != 0)
drivers/net/ethernet/broadcom/tg3.c
6260
nsec = rq->perout.start.sec * 1000000000ULL +
drivers/net/ethernet/broadcom/tg3.c
6261
rq->perout.start.nsec;
drivers/net/ethernet/broadcom/tg3.c
6263
if (rq->perout.period.sec || rq->perout.period.nsec) {
drivers/net/ethernet/cadence/macb_main.c
4050
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/cadence/macb_main.c
4057
return phylink_mii_ioctl(bp->phylink, rq, cmd);
drivers/net/ethernet/cadence/macb_ptp.c
186
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/cavium/common/cavium_ptp.c
207
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/cavium/liquidio/lio_main.c
1617
struct ptp_clock_request __maybe_unused *rq,
drivers/net/ethernet/cavium/thunder/nic.h
588
struct rq_cfg_msg rq;
drivers/net/ethernet/cavium/thunder/nic_main.c
1008
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
drivers/net/ethernet/cavium/thunder/nic_main.c
1009
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
drivers/net/ethernet/cavium/thunder/nic_main.c
1010
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
drivers/net/ethernet/cavium/thunder/nic_main.c
1017
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
drivers/net/ethernet/cavium/thunder/nic_main.c
1018
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
drivers/net/ethernet/cavium/thunder/nic_main.c
1019
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
drivers/net/ethernet/cavium/thunder/nic_main.c
993
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
drivers/net/ethernet/cavium/thunder/nic_main.c
994
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
drivers/net/ethernet/cavium/thunder/nic_main.c
995
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
309
*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
drivers/net/ethernet/cavium/thunder/nicvf_main.c
530
struct rcv_queue *rq, struct sk_buff **skb)
drivers/net/ethernet/cavium/thunder/nicvf_main.c
551
&rq->xdp_rxq);
drivers/net/ethernet/cavium/thunder/nicvf_main.c
773
struct snd_queue *sq, struct rcv_queue *rq)
drivers/net/ethernet/cavium/thunder/nicvf_main.c
799
if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
drivers/net/ethernet/cavium/thunder/nicvf_main.c
857
struct rcv_queue *rq = &qs->rq[cq_idx];
drivers/net/ethernet/cavium/thunder/nicvf_main.c
888
nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1813
struct rcv_queue *rq;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1819
rq = &nic->qs->rq[rq_idx];
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1820
rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1821
rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
748
struct rcv_queue *rq;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
751
rq = &qs->rq[qidx];
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
752
rq->enable = enable;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
757
if (!rq->enable) {
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
759
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
763
rq->cq_qs = qs->vnic_id;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
764
rq->cq_idx = qidx;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
765
rq->start_rbdr_qs = qs->vnic_id;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
766
rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
767
rq->cont_rbdr_qs = qs->vnic_id;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
768
rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
770
rq->caching = 1;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
773
WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx, 0) < 0);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
776
mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
777
mbx.rq.qs_num = qs->vnic_id;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
778
mbx.rq.rq_num = qidx;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
779
mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
780
(rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
781
(rq->cont_qs_rbdr_idx << 8) |
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
782
(rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
785
mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
786
mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
794
mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
795
mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
299
struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1957
static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1965
if (rq->polling) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1970
rq->offload_bundles++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1973
offload_enqueue(rq, skb);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2084
static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
drivers/net/ethernet/chelsio/cxgb3/sge.c
2088
struct sge_qset *qs = rspq_to_qset(rq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2106
if (rq->polling) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
2701
struct sge_rspq *rq)
drivers/net/ethernet/chelsio/cxgb3/sge.c
2705
work = process_responses(adap, rspq_to_qset(rq), -1);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2706
t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
drivers/net/ethernet/chelsio/cxgb3/sge.c
2707
V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2099
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1178
const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1180
c->rx_coalesce_usecs = qtimer_val(adap, rq);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1181
c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1182
adap->sge.counter_val[rq->pktcnt_idx] : 0;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5234
adap->vres.rq.start = val[2];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5235
adap->vres.rq.size = val[3] - val[2] + 1;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
360
struct cxgb4_range rq;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4851
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4857
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4859
rq->cntxt_id, fl_id, 0xffff);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4860
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4861
rq->desc, rq->phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4862
netif_napi_del(&rq->napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4863
rq->netdev = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4864
rq->cntxt_id = rq->abs_id = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4865
rq->desc = NULL;
drivers/net/ethernet/cisco/enic/enic.h
226
struct enic_rq *rq;
drivers/net/ethernet/cisco/enic/enic.h
274
static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
drivers/net/ethernet/cisco/enic/enic.h
276
return rq;
drivers/net/ethernet/cisco/enic/enic.h
285
unsigned int rq)
drivers/net/ethernet/cisco/enic/enic.h
287
return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
drivers/net/ethernet/cisco/enic/enic_clsf.c
21
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
drivers/net/ethernet/cisco/enic/enic_clsf.c
45
res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
drivers/net/ethernet/cisco/enic/enic_clsf.c
47
res = (res == 0) ? rq : res;
drivers/net/ethernet/cisco/enic/enic_clsf.h
10
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
drivers/net/ethernet/cisco/enic/enic_ethtool.c
340
struct enic_rq_stats *rqstats = &enic->rq[i].stats;
drivers/net/ethernet/cisco/enic/enic_main.c
1265
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/enic_main.c
1267
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
drivers/net/ethernet/cisco/enic/enic_main.c
1268
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
drivers/net/ethernet/cisco/enic/enic_main.c
1277
static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/enic_main.c
1280
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
drivers/net/ethernet/cisco/enic/enic_main.c
1353
err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf);
drivers/net/ethernet/cisco/enic/enic_main.c
1365
enic_calc_int_moderation(enic, &enic->rq[0].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1374
enic_set_int_moderation(enic, &enic->rq[0].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1376
enic->rq[0].stats.napi_complete++;
drivers/net/ethernet/cisco/enic/enic_main.c
1378
enic->rq[0].stats.napi_repoll++;
drivers/net/ethernet/cisco/enic/enic_main.c
1456
unsigned int rq = (napi - &enic->napi[0]);
drivers/net/ethernet/cisco/enic/enic_main.c
1457
unsigned int cq = enic_cq_rq(enic, rq);
drivers/net/ethernet/cisco/enic/enic_main.c
1458
unsigned int intr = enic_msix_rq_intr(enic, rq);
drivers/net/ethernet/cisco/enic/enic_main.c
1480
err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf);
drivers/net/ethernet/cisco/enic/enic_main.c
1492
enic_calc_int_moderation(enic, &enic->rq[rq].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1501
enic_set_int_moderation(enic, &enic->rq[rq].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1503
enic->rq[rq].stats.napi_complete++;
drivers/net/ethernet/cisco/enic/enic_main.c
1505
enic->rq[rq].stats.napi_repoll++;
drivers/net/ethernet/cisco/enic/enic_main.c
1715
enic->rq[i].pool = page_pool_create(&pp_params);
drivers/net/ethernet/cisco/enic/enic_main.c
1716
if (IS_ERR(enic->rq[i].pool)) {
drivers/net/ethernet/cisco/enic/enic_main.c
1717
err = PTR_ERR(enic->rq[i].pool);
drivers/net/ethernet/cisco/enic/enic_main.c
1718
enic->rq[i].pool = NULL;
drivers/net/ethernet/cisco/enic/enic_main.c
1723
vnic_rq_enable(&enic->rq[i].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1724
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
drivers/net/ethernet/cisco/enic/enic_main.c
1726
if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) {
drivers/net/ethernet/cisco/enic/enic_main.c
1761
ret = vnic_rq_disable(&enic->rq[i].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1763
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
drivers/net/ethernet/cisco/enic/enic_main.c
1764
page_pool_destroy(enic->rq[i].pool);
drivers/net/ethernet/cisco/enic/enic_main.c
1765
enic->rq[i].pool = NULL;
drivers/net/ethernet/cisco/enic/enic_main.c
1813
err = vnic_rq_disable(&enic->rq[i].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
1825
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
drivers/net/ethernet/cisco/enic/enic_main.c
1826
page_pool_destroy(enic->rq[i].pool);
drivers/net/ethernet/cisco/enic/enic_main.c
1827
enic->rq[i].pool = NULL;
drivers/net/ethernet/cisco/enic/enic_main.c
2333
struct enic_rq_stats *rqstats = &enic->rq[idx].stats;
drivers/net/ethernet/cisco/enic/enic_main.c
2438
kfree(enic->rq);
drivers/net/ethernet/cisco/enic/enic_main.c
2439
enic->rq = NULL;
drivers/net/ethernet/cisco/enic/enic_main.c
2463
enic->rq = kzalloc_objs(struct enic_rq, enic->rq_avail);
drivers/net/ethernet/cisco/enic/enic_main.c
2464
if (!enic->rq)
drivers/net/ethernet/cisco/enic/enic_main.c
337
error_status = vnic_rq_error_status(&enic->rq[i].vrq);
drivers/net/ethernet/cisco/enic/enic_main.c
924
struct enic_rq_stats *rqs = &enic->rq[i].stats;
drivers/net/ethernet/cisco/enic/enic_main.c
926
if (!enic->rq[i].vrq.ctrl)
drivers/net/ethernet/cisco/enic/enic_res.c
188
vnic_rq_free(&enic->rq[i].vrq);
drivers/net/ethernet/cisco/enic/enic_res.c
251
vnic_rq_init(&enic->rq[i].vrq,
drivers/net/ethernet/cisco/enic/enic_res.c
367
err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
drivers/net/ethernet/cisco/enic/enic_res.h
112
static inline void enic_queue_rq_desc(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/enic_res.h
116
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
drivers/net/ethernet/cisco/enic/enic_res.h
125
vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
drivers/net/ethernet/cisco/enic/enic_rq.c
106
struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
drivers/net/ethernet/cisco/enic/enic_rq.c
256
struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
drivers/net/ethernet/cisco/enic/enic_rq.c
270
int enic_rq_alloc_buf(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/enic_rq.c
272
struct enic *enic = vnic_dev_priv(rq->vdev);
drivers/net/ethernet/cisco/enic/enic_rq.c
274
struct enic_rq *erq = &enic->rq[rq->index];
drivers/net/ethernet/cisco/enic/enic_rq.c
280
struct vnic_rq_buf *buf = rq->to_use;
drivers/net/ethernet/cisco/enic/enic_rq.c
285
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
drivers/net/ethernet/cisco/enic/enic_rq.c
299
enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
drivers/net/ethernet/cisco/enic/enic_rq.c
304
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
drivers/net/ethernet/cisco/enic/enic_rq.c
306
struct enic *enic = vnic_dev_priv(rq->vdev);
drivers/net/ethernet/cisco/enic/enic_rq.c
307
struct enic_rq *erq = &enic->rq[rq->index];
drivers/net/ethernet/cisco/enic/enic_rq.c
316
static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/enic_rq.c
321
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
drivers/net/ethernet/cisco/enic/enic_rq.c
322
struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
drivers/net/ethernet/cisco/enic/enic_rq.c
343
if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
drivers/net/ethernet/cisco/enic/enic_rq.c
350
napi = &enic->napi[rq->index];
drivers/net/ethernet/cisco/enic/enic_rq.c
354
enic->netdev->name, rq->index,
drivers/net/ethernet/cisco/enic/enic_rq.c
368
enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
drivers/net/ethernet/cisco/enic/enic_rq.c
390
struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
drivers/net/ethernet/cisco/enic/enic_rq.c
391
struct vnic_rq *vrq = &enic->rq[q_number].vrq;
drivers/net/ethernet/cisco/enic/enic_rq.h
7
int enic_rq_alloc_buf(struct vnic_rq *rq);
drivers/net/ethernet/cisco/enic/enic_rq.h
8
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
drivers/net/ethernet/cisco/enic/vnic_rq.c
101
static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
drivers/net/ethernet/cisco/enic/vnic_rq.c
107
unsigned int count = rq->ring.desc_count;
drivers/net/ethernet/cisco/enic/vnic_rq.c
109
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
drivers/net/ethernet/cisco/enic/vnic_rq.c
110
writeq(paddr, &rq->ctrl->ring_base);
drivers/net/ethernet/cisco/enic/vnic_rq.c
111
iowrite32(count, &rq->ctrl->ring_size);
drivers/net/ethernet/cisco/enic/vnic_rq.c
112
iowrite32(cq_index, &rq->ctrl->cq_index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
113
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
drivers/net/ethernet/cisco/enic/vnic_rq.c
114
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
drivers/net/ethernet/cisco/enic/vnic_rq.c
115
iowrite32(0, &rq->ctrl->dropped_packet_count);
drivers/net/ethernet/cisco/enic/vnic_rq.c
116
iowrite32(0, &rq->ctrl->error_status);
drivers/net/ethernet/cisco/enic/vnic_rq.c
117
iowrite32(fetch_index, &rq->ctrl->fetch_index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
118
iowrite32(posted_index, &rq->ctrl->posted_index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
120
rq->to_use = rq->to_clean =
drivers/net/ethernet/cisco/enic/vnic_rq.c
121
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
drivers/net/ethernet/cisco/enic/vnic_rq.c
125
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
drivers/net/ethernet/cisco/enic/vnic_rq.c
129
vnic_rq_init_start(rq, cq_index, 0, 0, error_interrupt_enable,
drivers/net/ethernet/cisco/enic/vnic_rq.c
133
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.c
135
return ioread32(&rq->ctrl->error_status);
drivers/net/ethernet/cisco/enic/vnic_rq.c
138
void vnic_rq_enable(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.c
140
iowrite32(1, &rq->ctrl->enable);
drivers/net/ethernet/cisco/enic/vnic_rq.c
143
int vnic_rq_disable(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.c
146
struct vnic_dev *vdev = rq->vdev;
drivers/net/ethernet/cisco/enic/vnic_rq.c
154
iowrite32(0, &rq->ctrl->enable);
drivers/net/ethernet/cisco/enic/vnic_rq.c
158
if (!ioread32(&rq->ctrl->running))
drivers/net/ethernet/cisco/enic/vnic_rq.c
162
rq->index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
171
void vnic_rq_clean(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/vnic_rq.c
172
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
drivers/net/ethernet/cisco/enic/vnic_rq.c
176
unsigned int count = rq->ring.desc_count;
drivers/net/ethernet/cisco/enic/vnic_rq.c
179
buf = rq->to_clean;
drivers/net/ethernet/cisco/enic/vnic_rq.c
18
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.c
181
for (i = 0; i < rq->ring.desc_count; i++) {
drivers/net/ethernet/cisco/enic/vnic_rq.c
182
(*buf_clean)(rq, buf);
drivers/net/ethernet/cisco/enic/vnic_rq.c
185
rq->ring.desc_avail = rq->ring.desc_count - 1;
drivers/net/ethernet/cisco/enic/vnic_rq.c
188
fetch_index = ioread32(&rq->ctrl->fetch_index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
194
rq->to_use = rq->to_clean =
drivers/net/ethernet/cisco/enic/vnic_rq.c
195
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
drivers/net/ethernet/cisco/enic/vnic_rq.c
197
iowrite32(fetch_index, &rq->ctrl->posted_index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
202
iowrite32(0, &rq->ctrl->enable);
drivers/net/ethernet/cisco/enic/vnic_rq.c
204
vnic_dev_clear_desc_ring(&rq->ring);
drivers/net/ethernet/cisco/enic/vnic_rq.c
21
unsigned int i, j, count = rq->ring.desc_count;
drivers/net/ethernet/cisco/enic/vnic_rq.c
25
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
drivers/net/ethernet/cisco/enic/vnic_rq.c
26
if (!rq->bufs[i])
drivers/net/ethernet/cisco/enic/vnic_rq.c
31
buf = rq->bufs[i];
drivers/net/ethernet/cisco/enic/vnic_rq.c
34
buf->desc = (u8 *)rq->ring.descs +
drivers/net/ethernet/cisco/enic/vnic_rq.c
35
rq->ring.desc_size * buf->index;
drivers/net/ethernet/cisco/enic/vnic_rq.c
37
buf->next = rq->bufs[0];
drivers/net/ethernet/cisco/enic/vnic_rq.c
40
buf->next = rq->bufs[i + 1];
drivers/net/ethernet/cisco/enic/vnic_rq.c
48
rq->to_use = rq->to_clean = rq->bufs[0];
drivers/net/ethernet/cisco/enic/vnic_rq.c
53
void vnic_rq_free(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.c
58
vdev = rq->vdev;
drivers/net/ethernet/cisco/enic/vnic_rq.c
60
vnic_dev_free_desc_ring(vdev, &rq->ring);
drivers/net/ethernet/cisco/enic/vnic_rq.c
63
if (rq->bufs[i]) {
drivers/net/ethernet/cisco/enic/vnic_rq.c
64
kfree(rq->bufs[i]);
drivers/net/ethernet/cisco/enic/vnic_rq.c
65
rq->bufs[i] = NULL;
drivers/net/ethernet/cisco/enic/vnic_rq.c
69
rq->ctrl = NULL;
drivers/net/ethernet/cisco/enic/vnic_rq.c
72
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
drivers/net/ethernet/cisco/enic/vnic_rq.c
77
rq->index = index;
drivers/net/ethernet/cisco/enic/vnic_rq.c
78
rq->vdev = vdev;
drivers/net/ethernet/cisco/enic/vnic_rq.c
80
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
drivers/net/ethernet/cisco/enic/vnic_rq.c
81
if (!rq->ctrl) {
drivers/net/ethernet/cisco/enic/vnic_rq.c
86
vnic_rq_disable(rq);
drivers/net/ethernet/cisco/enic/vnic_rq.c
88
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
drivers/net/ethernet/cisco/enic/vnic_rq.c
92
err = vnic_rq_alloc_bufs(rq);
drivers/net/ethernet/cisco/enic/vnic_rq.c
94
vnic_rq_free(rq);
drivers/net/ethernet/cisco/enic/vnic_rq.h
100
return rq->to_use->desc;
drivers/net/ethernet/cisco/enic/vnic_rq.h
103
static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.h
105
return rq->to_use->index;
drivers/net/ethernet/cisco/enic/vnic_rq.h
108
static inline void vnic_rq_post(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/vnic_rq.h
113
struct vnic_rq_buf *buf = rq->to_use;
drivers/net/ethernet/cisco/enic/vnic_rq.h
122
rq->to_use = buf;
drivers/net/ethernet/cisco/enic/vnic_rq.h
123
rq->ring.desc_avail--;
drivers/net/ethernet/cisco/enic/vnic_rq.h
139
iowrite32(buf->index, &rq->ctrl->posted_index);
drivers/net/ethernet/cisco/enic/vnic_rq.h
143
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
drivers/net/ethernet/cisco/enic/vnic_rq.h
145
rq->ring.desc_avail += count;
drivers/net/ethernet/cisco/enic/vnic_rq.h
153
static inline void vnic_rq_service(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/vnic_rq.h
155
int desc_return, void (*buf_service)(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/vnic_rq.h
162
buf = rq->to_clean;
drivers/net/ethernet/cisco/enic/vnic_rq.h
167
(*buf_service)(rq, cq_desc, buf, skipped, opaque);
drivers/net/ethernet/cisco/enic/vnic_rq.h
170
rq->ring.desc_avail++;
drivers/net/ethernet/cisco/enic/vnic_rq.h
172
rq->to_clean = buf->next;
drivers/net/ethernet/cisco/enic/vnic_rq.h
177
buf = rq->to_clean;
drivers/net/ethernet/cisco/enic/vnic_rq.h
181
static inline int vnic_rq_fill(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/vnic_rq.h
182
int (*buf_fill)(struct vnic_rq *rq))
drivers/net/ethernet/cisco/enic/vnic_rq.h
186
while (vnic_rq_desc_avail(rq) > 0) {
drivers/net/ethernet/cisco/enic/vnic_rq.h
188
err = (*buf_fill)(rq);
drivers/net/ethernet/cisco/enic/vnic_rq.h
196
void vnic_rq_free(struct vnic_rq *rq);
drivers/net/ethernet/cisco/enic/vnic_rq.h
197
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
drivers/net/ethernet/cisco/enic/vnic_rq.h
199
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
drivers/net/ethernet/cisco/enic/vnic_rq.h
202
unsigned int vnic_rq_error_status(struct vnic_rq *rq);
drivers/net/ethernet/cisco/enic/vnic_rq.h
203
void vnic_rq_enable(struct vnic_rq *rq);
drivers/net/ethernet/cisco/enic/vnic_rq.h
204
int vnic_rq_disable(struct vnic_rq *rq);
drivers/net/ethernet/cisco/enic/vnic_rq.h
205
void vnic_rq_clean(struct vnic_rq *rq,
drivers/net/ethernet/cisco/enic/vnic_rq.h
206
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
drivers/net/ethernet/cisco/enic/vnic_rq.h
86
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.h
89
return rq->ring.desc_avail;
drivers/net/ethernet/cisco/enic/vnic_rq.h
92
static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
drivers/net/ethernet/cisco/enic/vnic_rq.h
95
return rq->ring.desc_count - rq->ring.desc_avail - 1;
drivers/net/ethernet/cisco/enic/vnic_rq.h
98
static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
drivers/net/ethernet/dec/tulip/tulip_core.c
257
static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/dec/tulip/tulip_core.c
897
static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/dec/tulip/tulip_core.c
901
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/dec/tulip/winbond-840.c
1438
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/dec/tulip/winbond-840.c
1440
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/dec/tulip/winbond-840.c
334
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/dlink/dl2k.c
1364
rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/dlink/dl2k.c
1368
struct mii_ioctl_data *miidata = if_mii(rq);
drivers/net/ethernet/dlink/dl2k.c
74
static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/dlink/sundance.c
1805
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/dlink/sundance.c
1814
rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
drivers/net/ethernet/dlink/sundance.c
442
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/fealnx.c
1873
static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/fealnx.c
1882
rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
drivers/net/ethernet/fealnx.c
432
static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3139
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
3143
return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2635
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
2643
err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
16
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
28
switch (rq->type) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
30
switch (rq->extts.index) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
41
extts_clean_up(ptp_qoriq, rq->extts.index, false);
drivers/net/ethernet/freescale/enetc/enetc.c
3449
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
drivers/net/ethernet/freescale/enetc/enetc.c
3456
return phylink_mii_ioctl(priv->phylink, rq, cmd);
drivers/net/ethernet/freescale/enetc/enetc.h
526
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
drivers/net/ethernet/freescale/fec_ptp.c
525
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/freescale/fec_ptp.c
535
if (rq->type == PTP_CLK_REQ_PPS) {
drivers/net/ethernet/freescale/fec_ptp.c
541
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
drivers/net/ethernet/freescale/fec_ptp.c
545
if (rq->perout.flags)
drivers/net/ethernet/freescale/fec_ptp.c
548
period.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/freescale/fec_ptp.c
549
period.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/freescale/fec_ptp.c
565
start_time.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/freescale/fec_ptp.c
566
start_time.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/freescale/gianfar.c
1757
int i, rq = 0;
drivers/net/ethernet/freescale/gianfar.c
1762
rq = skb->queue_mapping;
drivers/net/ethernet/freescale/gianfar.c
1763
tx_queue = priv->tx_queue[rq];
drivers/net/ethernet/freescale/gianfar.c
1764
txq = netdev_get_tx_queue(dev, rq);
drivers/net/ethernet/freescale/gianfar.h
599
u8 rq; /* Receive Queue index */
drivers/net/ethernet/freescale/ucc_geth.c
3359
static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/freescale/ucc_geth.c
3366
return phylink_mii_ioctl(ugeth->phylink, rq, cmd);
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
236
struct hinic_rq *rq;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
240
rq = dev->rxqs[rq_id].rq;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
246
return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields,
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
250
void hinic_rq_debug_rem(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
252
if (rq->dbg)
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
253
rem_dbg_files(rq->dbg);
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
53
static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
55
struct hinic_wq *wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
59
return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
61
return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
67
return rq->msix_entry;
drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
87
void hinic_rq_debug_rem(struct hinic_rq *rq);
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
700
msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
1084
return &qp->rq;
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
175
hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
318
qp->rq.qid = q_id;
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
319
err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
352
hinic_clean_rq(&qp->rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
155
struct hinic_rq *rq, u16 global_qid)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
162
wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
181
HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
202
rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
203
rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
247
static int alloc_rq_skb_arr(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
249
struct hinic_wq *wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
252
skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
253
rq->saved_skb = vzalloc(skb_arr_size);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
254
if (!rq->saved_skb)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
264
static void free_rq_skb_arr(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
266
vfree(rq->saved_skb);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
316
static int alloc_rq_cqe(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
318
struct hinic_hwif *hwif = rq->hwif;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
321
struct hinic_wq *wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
324
cqe_size = wq->q_depth * sizeof(*rq->cqe);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
325
rq->cqe = vzalloc(cqe_size);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
326
if (!rq->cqe)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
329
cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
330
rq->cqe_dma = vzalloc(cqe_dma_size);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
331
if (!rq->cqe_dma)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
335
rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
336
sizeof(*rq->cqe[i]),
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
337
&rq->cqe_dma[i], GFP_KERNEL);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
338
if (!rq->cqe[i])
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
346
dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
347
rq->cqe_dma[j]);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
349
vfree(rq->cqe_dma);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
352
vfree(rq->cqe);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
360
static void free_rq_cqe(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
362
struct hinic_hwif *hwif = rq->hwif;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
364
struct hinic_wq *wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
368
dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
369
rq->cqe_dma[i]);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
371
vfree(rq->cqe_dma);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
372
vfree(rq->cqe);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
384
int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
391
rq->hwif = hwif;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
393
rq->wq = wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
395
rq->irq = entry->vector;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
396
rq->msix_entry = entry->entry;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
398
rq->buf_sz = HINIC_RX_BUF_SZ;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
400
err = alloc_rq_skb_arr(rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
406
err = alloc_rq_cqe(rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
413
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
414
rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
415
&rq->pi_dma_addr, GFP_KERNEL);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
416
if (!rq->pi_virt_addr) {
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
424
free_rq_cqe(rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
427
free_rq_skb_arr(rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
435
void hinic_clean_rq(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
437
struct hinic_hwif *hwif = rq->hwif;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
441
pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
442
dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
443
rq->pi_dma_addr);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
445
free_rq_cqe(rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
446
free_rq_skb_arr(rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
468
int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
470
struct hinic_wq *wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
62
#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
796
struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
799
struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
815
void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
820
rq->saved_skb[prod_idx] = skb;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
825
hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
837
struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
846
hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
850
cqe = rq->cqe[*cons_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
858
*skb = rq->saved_skb[*cons_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
872
struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
877
struct hinic_wq *wq = rq->wq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
884
*cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
886
*skb = rq->saved_skb[*cons_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
899
void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
902
struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
912
hinic_put_wqe(rq->wq, wqe_size);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
922
void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
925
struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
940
void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
945
struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
947
dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
968
void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
970
*rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
123
struct hinic_rq rq;
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
136
struct hinic_rq *rq, u16 global_qid);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
144
int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
147
void hinic_clean_rq(struct hinic_rq *rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
151
int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
206
struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
209
void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
212
struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
216
struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
221
void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
224
void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
227
void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
230
void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
drivers/net/ethernet/huawei/hinic/hinic_main.c
242
struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
drivers/net/ethernet/huawei/hinic/hinic_main.c
244
err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
drivers/net/ethernet/huawei/hinic/hinic_main.c
265
hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
drivers/net/ethernet/huawei/hinic/hinic_main.c
289
hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
138
skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
142
addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
170
dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
204
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
217
rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
224
hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
226
hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
233
hinic_rq_update(rxq->rq, prod_idx);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
245
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
250
while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
drivers/net/ethernet/huawei/hinic/hinic_rx.c
254
hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
256
hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
258
rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
drivers/net/ethernet/huawei/hinic/hinic_rx.c
281
rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
286
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
356
struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
359
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
379
rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
387
cqe = rq->cqe[ci];
drivers/net/ethernet/huawei/hinic/hinic_rx.c
389
hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
407
hinic_rq_put_wqe(rq, ci,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
445
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
461
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
472
rq->msix_entry,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
496
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
503
rq->msix_entry,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
507
hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
519
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
523
qp = container_of(rq, struct hinic_qp, rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
527
hinic_hwdev_msix_set(hwdev, rq->msix_entry,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
533
interrupt_info.msix_index = rq->msix_entry;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
545
err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
549
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
550
err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
557
free_irq(rq->irq, rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
565
struct hinic_rq *rq = rxq->rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.c
567
irq_update_affinity_hint(rq->irq, NULL);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
568
free_irq(rq->irq, rxq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
580
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic/hinic_rx.c
583
struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
drivers/net/ethernet/huawei/hinic/hinic_rx.c
587
rxq->rq = rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.h
33
struct hinic_rq *rq;
drivers/net/ethernet/huawei/hinic/hinic_rx.h
46
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
340
struct hinic3_io_queue *rq,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
345
rq->q_id = q_id;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
346
rq->msix_entry_idx = rq_msix_idx;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
348
err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
362
struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
374
err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
391
struct hinic3_io_queue *rq)
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
394
hinic3_wq_destroy(hwdev, &rq->wq);
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
475
nic_io->rq = qp_params->rqs;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
493
qp_params->rqs = nic_io->rq;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
578
static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
586
wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
592
wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
597
static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
604
ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
605
pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
607
hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
616
RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
716
struct hinic3_io_queue *rq;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
740
rq = &nic_io->rq[curr_id];
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
741
hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
853
rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
108
struct hinic3_io_queue *rq;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
125
qpages = &rxq->rq->wq.qpages;
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
136
static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
141
rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
163
rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
169
hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ,
drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
524
rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
91
struct hinic3_io_queue *rq;
drivers/net/ethernet/ibm/ehea/ehea_main.c
617
static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
drivers/net/ethernet/ibm/ehea/ehea_main.c
630
if (rq == 2) {
drivers/net/ethernet/ibm/ehea/ehea_main.c
634
} else if (rq == 3) {
drivers/net/ethernet/ibm/ehea/ehea_main.c
669
int wqe_index, last_wqe_index, rq, port_reset;
drivers/net/ethernet/ibm/ehea/ehea_main.c
684
if (!ehea_check_cqe(cqe, &rq)) {
drivers/net/ethernet/ibm/ehea/ehea_main.c
685
if (rq == 1) {
drivers/net/ethernet/ibm/ehea/ehea_main.c
702
} else if (rq == 2) {
drivers/net/ethernet/ibm/ehea/ehea_main.c
735
port_reset = ehea_treat_poll_error(pr, rq, cqe,
drivers/net/ethernet/ibm/emac/core.c
2293
static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
drivers/net/ethernet/ibm/emac/core.c
2296
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/intel/i40e/i40e_ptp.c
491
struct ptp_clock_request *rq,
drivers/net/ethernet/intel/i40e/i40e_ptp.c
596
struct ptp_clock_request *rq,
drivers/net/ethernet/intel/i40e/i40e_ptp.c
605
switch (rq->type) {
drivers/net/ethernet/intel/i40e/i40e_ptp.c
608
chan = rq->extts.index;
drivers/net/ethernet/intel/i40e/i40e_ptp.c
612
chan = rq->perout.index;
drivers/net/ethernet/intel/i40e/i40e_ptp.c
615
return i40e_pps_configure(ptp, rq, on);
drivers/net/ethernet/intel/ice/ice_base.c
607
static int ice_rxq_pp_create(struct ice_rx_ring *rq)
drivers/net/ethernet/intel/ice/ice_base.c
610
.count = rq->count,
drivers/net/ethernet/intel/ice/ice_base.c
612
.hsplit = rq->vsi->hsplit,
drivers/net/ethernet/intel/ice/ice_base.c
613
.xdp = ice_is_xdp_ena_vsi(rq->vsi),
drivers/net/ethernet/intel/ice/ice_base.c
618
err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
drivers/net/ethernet/intel/ice/ice_base.c
622
rq->pp = fq.pp;
drivers/net/ethernet/intel/ice/ice_base.c
623
rq->rx_fqes = fq.fqes;
drivers/net/ethernet/intel/ice/ice_base.c
624
rq->truesize = fq.truesize;
drivers/net/ethernet/intel/ice/ice_base.c
625
rq->rx_buf_len = fq.buf_len;
drivers/net/ethernet/intel/ice/ice_base.c
631
.count = rq->count,
drivers/net/ethernet/intel/ice/ice_base.c
634
.xdp = ice_is_xdp_ena_vsi(rq->vsi),
drivers/net/ethernet/intel/ice/ice_base.c
637
err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
drivers/net/ethernet/intel/ice/ice_base.c
641
rq->hdr_pp = fq.pp;
drivers/net/ethernet/intel/ice/ice_base.c
642
rq->hdr_fqes = fq.fqes;
drivers/net/ethernet/intel/ice/ice_base.c
643
rq->hdr_truesize = fq.truesize;
drivers/net/ethernet/intel/ice/ice_base.c
644
rq->rx_hdr_len = fq.buf_len;
drivers/net/ethernet/intel/ice/ice_base.c
649
ice_rxq_pp_destroy(rq);
drivers/net/ethernet/intel/ice/ice_controlq.c
1131
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
drivers/net/ethernet/intel/ice/ice_controlq.c
115
cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
drivers/net/ethernet/intel/ice/ice_controlq.c
116
&cq->rq.desc_buf.pa,
drivers/net/ethernet/intel/ice/ice_controlq.c
1177
u16 ntc = cq->rq.next_to_clean;
drivers/net/ethernet/intel/ice/ice_controlq.c
118
if (!cq->rq.desc_buf.va)
drivers/net/ethernet/intel/ice/ice_controlq.c
1192
if (!cq->rq.count) {
drivers/net/ethernet/intel/ice/ice_controlq.c
1199
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
drivers/net/ethernet/intel/ice/ice_controlq.c
120
cq->rq.desc_buf.size = size;
drivers/net/ethernet/intel/ice/ice_controlq.c
1208
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
drivers/net/ethernet/intel/ice/ice_controlq.c
1222
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
drivers/net/ethernet/intel/ice/ice_controlq.c
1231
bi = &cq->rq.r.rq_bi[ntc];
drivers/net/ethernet/intel/ice/ice_controlq.c
1242
wr32(hw, cq->rq.tail, ntc);
drivers/net/ethernet/intel/ice/ice_controlq.c
1247
cq->rq.next_to_clean = ntc;
drivers/net/ethernet/intel/ice/ice_controlq.c
1248
cq->rq.next_to_use = ntu;
drivers/net/ethernet/intel/ice/ice_controlq.c
1254
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
drivers/net/ethernet/intel/ice/ice_controlq.c
1255
*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
drivers/net/ethernet/intel/ice/ice_controlq.c
154
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
drivers/net/ethernet/intel/ice/ice_controlq.c
155
sizeof(cq->rq.desc_buf), GFP_KERNEL);
drivers/net/ethernet/intel/ice/ice_controlq.c
156
if (!cq->rq.dma_head)
drivers/net/ethernet/intel/ice/ice_controlq.c
158
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
drivers/net/ethernet/intel/ice/ice_controlq.c
165
bi = &cq->rq.r.rq_bi[i];
drivers/net/ethernet/intel/ice/ice_controlq.c
17
(qinfo)->rq.head = prefix##_ARQH; \
drivers/net/ethernet/intel/ice/ice_controlq.c
174
desc = ICE_CTL_Q_DESC(cq->rq, i);
drivers/net/ethernet/intel/ice/ice_controlq.c
18
(qinfo)->rq.tail = prefix##_ARQT; \
drivers/net/ethernet/intel/ice/ice_controlq.c
19
(qinfo)->rq.len = prefix##_ARQLEN; \
drivers/net/ethernet/intel/ice/ice_controlq.c
20
(qinfo)->rq.bah = prefix##_ARQBAH; \
drivers/net/ethernet/intel/ice/ice_controlq.c
200
dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
drivers/net/ethernet/intel/ice/ice_controlq.c
201
cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
drivers/net/ethernet/intel/ice/ice_controlq.c
202
cq->rq.r.rq_bi[i].va = NULL;
drivers/net/ethernet/intel/ice/ice_controlq.c
203
cq->rq.r.rq_bi[i].pa = 0;
drivers/net/ethernet/intel/ice/ice_controlq.c
204
cq->rq.r.rq_bi[i].size = 0;
drivers/net/ethernet/intel/ice/ice_controlq.c
206
cq->rq.r.rq_bi = NULL;
drivers/net/ethernet/intel/ice/ice_controlq.c
207
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
drivers/net/ethernet/intel/ice/ice_controlq.c
208
cq->rq.dma_head = NULL;
drivers/net/ethernet/intel/ice/ice_controlq.c
21
(qinfo)->rq.bal = prefix##_ARQBAL; \
drivers/net/ethernet/intel/ice/ice_controlq.c
22
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
drivers/net/ethernet/intel/ice/ice_controlq.c
23
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
drivers/net/ethernet/intel/ice/ice_controlq.c
24
(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
drivers/net/ethernet/intel/ice/ice_controlq.c
25
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
drivers/net/ethernet/intel/ice/ice_controlq.c
303
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
drivers/net/ethernet/intel/ice/ice_controlq.c
308
wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
drivers/net/ethernet/intel/ice/ice_controlq.c
412
if (cq->rq.count > 0) {
drivers/net/ethernet/intel/ice/ice_controlq.c
424
cq->rq.next_to_use = 0;
drivers/net/ethernet/intel/ice/ice_controlq.c
425
cq->rq.next_to_clean = 0;
drivers/net/ethernet/intel/ice/ice_controlq.c
443
cq->rq.count = cq->num_rq_entries;
drivers/net/ethernet/intel/ice/ice_controlq.c
447
ICE_FREE_CQ_BUFS(hw, cq, rq);
drivers/net/ethernet/intel/ice/ice_controlq.c
448
ice_free_cq_ring(hw, &cq->rq);
drivers/net/ethernet/intel/ice/ice_controlq.c
542
if (!cq->rq.count) {
drivers/net/ethernet/intel/ice/ice_controlq.c
548
wr32(hw, cq->rq.head, 0);
drivers/net/ethernet/intel/ice/ice_controlq.c
549
wr32(hw, cq->rq.tail, 0);
drivers/net/ethernet/intel/ice/ice_controlq.c
550
wr32(hw, cq->rq.len, 0);
drivers/net/ethernet/intel/ice/ice_controlq.c
551
wr32(hw, cq->rq.bal, 0);
drivers/net/ethernet/intel/ice/ice_controlq.c
552
wr32(hw, cq->rq.bah, 0);
drivers/net/ethernet/intel/ice/ice_controlq.c
555
cq->rq.count = 0;
drivers/net/ethernet/intel/ice/ice_controlq.c
558
ICE_FREE_CQ_BUFS(hw, cq, rq);
drivers/net/ethernet/intel/ice/ice_controlq.c
559
ice_free_cq_ring(hw, &cq->rq);
drivers/net/ethernet/intel/ice/ice_controlq.h
93
struct ice_ctl_q_ring rq; /* receive queue */
drivers/net/ethernet/intel/ice/ice_main.c
1457
val = rd32(hw, cq->rq.len);
drivers/net/ethernet/intel/ice/ice_main.c
1474
wr32(hw, cq->rq.len, val);
drivers/net/ethernet/intel/ice/ice_main.c
1578
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
drivers/net/ethernet/intel/ice/ice_main.c
1579
return cq->rq.next_to_clean != ntu;
drivers/net/ethernet/intel/ice/ice_ptp.c
1524
static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
drivers/net/ethernet/intel/ice/ice_ptp.c
1534
chan = rq->index;
drivers/net/ethernet/intel/ice/ice_ptp.c
1552
if (rq->flags & PTP_FALLING_EDGE)
drivers/net/ethernet/intel/ice/ice_ptp.c
1554
if (rq->flags & PTP_RISING_EDGE)
drivers/net/ethernet/intel/ice/ice_ptp.c
1695
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
drivers/net/ethernet/intel/ice/ice_ptp.c
1703
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
drivers/net/ethernet/intel/ice/ice_ptp.c
1709
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
drivers/net/ethernet/intel/ice/ice_ptp.c
1715
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
drivers/net/ethernet/intel/ice/ice_ptp.c
1728
start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec;
drivers/net/ethernet/intel/ice/ice_ptp.c
1731
if (rq->flags & PTP_PEROUT_PHASE)
drivers/net/ethernet/intel/ice/ice_ptp.c
1741
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
drivers/net/ethernet/intel/ice/ice_ptp.c
1747
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
drivers/net/ethernet/intel/ice/ice_ptp.c
1830
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/ice/ice_ptp.c
1835
switch (rq->type) {
drivers/net/ethernet/intel/ice/ice_ptp.c
1839
&pf->ptp.perout_rqs[rq->perout.index];
drivers/net/ethernet/intel/ice/ice_ptp.c
1841
err = ice_ptp_cfg_perout(pf, &rq->perout, on);
drivers/net/ethernet/intel/ice/ice_ptp.c
1843
*cached = rq->perout;
drivers/net/ethernet/intel/ice/ice_ptp.c
1853
&pf->ptp.extts_rqs[rq->extts.index];
drivers/net/ethernet/intel/ice/ice_ptp.c
1855
err = ice_ptp_cfg_extts(pf, &rq->extts, on);
drivers/net/ethernet/intel/ice/ice_ptp.c
1857
*cached = rq->extts;
drivers/net/ethernet/intel/ice/ice_txrx.c
511
void ice_rxq_pp_destroy(struct ice_rx_ring *rq)
drivers/net/ethernet/intel/ice/ice_txrx.c
514
.fqes = rq->rx_fqes,
drivers/net/ethernet/intel/ice/ice_txrx.c
515
.pp = rq->pp,
drivers/net/ethernet/intel/ice/ice_txrx.c
519
rq->rx_fqes = NULL;
drivers/net/ethernet/intel/ice/ice_txrx.c
520
rq->pp = NULL;
drivers/net/ethernet/intel/ice/ice_txrx.c
522
if (!rq->hdr_pp)
drivers/net/ethernet/intel/ice/ice_txrx.c
525
fq.fqes = rq->hdr_fqes;
drivers/net/ethernet/intel/ice/ice_txrx.c
526
fq.pp = rq->hdr_pp;
drivers/net/ethernet/intel/ice/ice_txrx.c
529
rq->hdr_fqes = NULL;
drivers/net/ethernet/intel/ice/ice_txrx.c
530
rq->hdr_pp = NULL;
drivers/net/ethernet/intel/ice/ice_txrx.h
454
void ice_rxq_pp_destroy(struct ice_rx_ring *rq);
drivers/net/ethernet/intel/ice/ice_vf_mbx.c
322
snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
drivers/net/ethernet/intel/ice/ice_vf_mbx.c
324
snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
drivers/net/ethernet/intel/idpf/idpf_ptp.c
562
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/igb/igb_ptp.c
490
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/igb/igb_ptp.c
503
switch (rq->type) {
drivers/net/ethernet/intel/igb/igb_ptp.c
506
if (rq->extts.flags & PTP_STRICT_FLAGS &&
drivers/net/ethernet/intel/igb/igb_ptp.c
507
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
drivers/net/ethernet/intel/igb/igb_ptp.c
508
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
drivers/net/ethernet/intel/igb/igb_ptp.c
513
rq->extts.index);
drivers/net/ethernet/intel/igb/igb_ptp.c
517
if (rq->extts.index == 1) {
drivers/net/ethernet/intel/igb/igb_ptp.c
528
igb_pin_extts(igb, rq->extts.index, pin);
drivers/net/ethernet/intel/igb/igb_ptp.c
542
if (rq->perout.flags)
drivers/net/ethernet/intel/igb/igb_ptp.c
547
rq->perout.index);
drivers/net/ethernet/intel/igb/igb_ptp.c
551
ts.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/intel/igb/igb_ptp.c
552
ts.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/intel/igb/igb_ptp.c
558
if (rq->perout.index == 1) {
drivers/net/ethernet/intel/igb/igb_ptp.c
572
if (rq->perout.index == 1) {
drivers/net/ethernet/intel/igb/igb_ptp.c
580
int i = rq->perout.index;
drivers/net/ethernet/intel/igb/igb_ptp.c
641
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/igb/igb_ptp.c
652
switch (rq->type) {
drivers/net/ethernet/intel/igb/igb_ptp.c
655
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
drivers/net/ethernet/intel/igb/igb_ptp.c
656
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
drivers/net/ethernet/intel/igb/igb_ptp.c
657
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
drivers/net/ethernet/intel/igb/igb_ptp.c
662
rq->extts.index);
drivers/net/ethernet/intel/igb/igb_ptp.c
666
if (rq->extts.index == 1) {
drivers/net/ethernet/intel/igb/igb_ptp.c
677
igb_pin_extts(igb, rq->extts.index, pin);
drivers/net/ethernet/intel/igb/igb_ptp.c
691
if (rq->perout.flags)
drivers/net/ethernet/intel/igb/igb_ptp.c
696
rq->perout.index);
drivers/net/ethernet/intel/igb/igb_ptp.c
700
ts.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/intel/igb/igb_ptp.c
701
ts.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/intel/igb/igb_ptp.c
711
if (rq->perout.index == 1) {
drivers/net/ethernet/intel/igb/igb_ptp.c
737
if (rq->perout.index == 1) {
drivers/net/ethernet/intel/igb/igb_ptp.c
745
int i = rq->perout.index;
drivers/net/ethernet/intel/igb/igb_ptp.c
747
igb->perout[i].start.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/intel/igb/igb_ptp.c
748
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/intel/igb/igb_ptp.c
751
wr32(trgttimh, rq->perout.start.sec);
drivers/net/ethernet/intel/igb/igb_ptp.c
752
wr32(trgttiml, rq->perout.start.nsec);
drivers/net/ethernet/intel/igb/igb_ptp.c
780
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/igc/igc_ptp.c
247
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/igc/igc_ptp.c
258
switch (rq->type) {
drivers/net/ethernet/intel/igc/igc_ptp.c
261
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
drivers/net/ethernet/intel/igc/igc_ptp.c
262
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
drivers/net/ethernet/intel/igc/igc_ptp.c
263
(rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
drivers/net/ethernet/intel/igc/igc_ptp.c
268
rq->extts.index);
drivers/net/ethernet/intel/igc/igc_ptp.c
272
if (rq->extts.index == 1) {
drivers/net/ethernet/intel/igc/igc_ptp.c
283
igc_pin_extts(igc, rq->extts.index, pin);
drivers/net/ethernet/intel/igc/igc_ptp.c
298
rq->perout.index);
drivers/net/ethernet/intel/igc/igc_ptp.c
302
ts.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/intel/igc/igc_ptp.c
303
ts.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/intel/igc/igc_ptp.c
313
if (rq->perout.index == 1) {
drivers/net/ethernet/intel/igc/igc_ptp.c
339
if (rq->perout.index == 1) {
drivers/net/ethernet/intel/igc/igc_ptp.c
350
int i = rq->perout.index;
drivers/net/ethernet/intel/igc/igc_ptp.c
367
if (rq->perout.start.sec < safe_start.tv_sec)
drivers/net/ethernet/intel/igc/igc_ptp.c
370
igc->perout[i].start.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/intel/igc/igc_ptp.c
371
igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
632
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
643
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
drivers/net/ethernet/jme.c
2612
jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
drivers/net/ethernet/jme.c
2616
struct mii_ioctl_data *mii_data = if_mii(rq);
drivers/net/ethernet/korina.c
921
static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/korina.c
924
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
1007
struct nix_cn20k_rq_ctx_s rq;
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
1023
struct nix_cn10k_rq_ctx_s rq;
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
1043
struct nix_cn10k_rq_ctx_s rq;
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
1059
struct nix_rq_ctx_s rq;
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
1079
struct nix_rq_ctx_s rq;
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
987
struct nix_cn20k_rq_ctx_s rq;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
2178
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1177
memcpy(ctx, &req->rq, NIX_MAX_CTX_SIZE);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1209
if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1219
ena = (req->rq.ena & req->rq_mask.ena) |
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1228
ena = (req->rq.ena & req->sq_mask.ena) |
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1237
ena = (req->rq.ena & req->cq_mask.ena) |
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1251
memcpy(&rsp->rq, ctx,
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
1411
aq_req.rq.ena = 0;
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6074
if (!aq_rsp.rq.policer_ena)
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6078
leaf_prof = FIELD_PREP(NIX_RQ_PROF_HI_MASK, aq_rsp.rq.band_prof_id_h);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6079
leaf_prof |= aq_rsp.rq.band_prof_id;
drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
688
uint32_t rq : 20;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
336
aq->rq.policer_ena = 1;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
338
aq->rq.policer_ena = 0;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
341
aq->rq.band_prof_id = policer;
drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
347
aq->rq.band_prof_id_h = policer >> 10;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1624
int stack_pages, pool_id, rq;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1634
for (rq = 0; rq < hw->rx_queues; rq++) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
1635
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
356
aq->rss.rq = ind_tbl[idx];
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
86
struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
88
if (!pfvf->qset.rq)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
900
aq->rq.cq = qidx;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
901
aq->rq.ena = 1;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
902
aq->rq.pb_caching = 1;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
903
aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
904
aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
905
aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
906
aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
907
aq->rq.qint_idx = 0;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
908
aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
909
aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
91
otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
910
aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
911
aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
912
aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
913
aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
152
*((*data)++) = ((u64 *)&pfvf->qset.rq[qidx].stats)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1917
kfree(qset->rq);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1918
qset->rq = NULL;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1957
qset->rq = kzalloc_objs(struct otx2_rcv_queue, pf->hw.rx_queues);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
1958
if (!qset->rq)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
340
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
350
switch (rq->type) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
353
rq->extts.index);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
363
if (rq->perout.flags)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
366
if (rq->perout.index >= ptp_info->n_pins)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
369
period = rq->perout.period.sec * NSEC_PER_SEC +
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
370
rq->perout.period.nsec;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1210
__clear_bit(flow_node->rq, &nic->rq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1217
err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1222
flow_node->rq, flow_node->leaf_profile);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1332
err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1337
new_node->rq, new_node->leaf_profile);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
1344
__clear_bit(new_node->rq, &nic->rq_bmap);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
311
rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
349
node->rq = rq_idx;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
46
u16 rq;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
169
struct otx2_rcv_queue *rq;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
48
cn10k_rq_aq->rq.ena = 0;
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
154
struct otx2_rcv_queue *rq;
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
159
rq = &priv->qset.rq[qidx];
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
166
stats->rx_bytes = rq->stats.bytes;
drivers/net/ethernet/marvell/octeontx2/nic/rep.c
167
stats->rx_packets = rq->stats.pkts;
drivers/net/ethernet/mellanox/mlx5/core/en.h
1065
struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1067
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1068
void mlx5e_close_rq(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1069
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1070
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1135
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1136
void mlx5e_activate_rq(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en.h
1137
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en.h
527
struct mlx5e_rq *rq;
drivers/net/ethernet/mellanox/mlx5/core/en.h
608
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en.h
612
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
drivers/net/ethernet/mellanox/mlx5/core/en.h
613
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
drivers/net/ethernet/mellanox/mlx5/core/en.h
760
struct mlx5e_rq rq;
drivers/net/ethernet/mellanox/mlx5/core/en.h
832
struct mlx5e_rq_stats rq;
drivers/net/ethernet/mellanox/mlx5/core/en.h
843
struct mlx5e_rq_stats rq;
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
101
if (chs->c[i]->rq.dim) {
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
104
mlx5e_dim_rx_change(&chs->c[i]->rq, false);
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
105
err = mlx5e_dim_rx_change(&chs->c[i]->rq, true);
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
32
*rqn = c->rq.rqn;
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
56
*rqn = c->rq.rqn;
drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
65
int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable);
drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
42
int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled);
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
32
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
33
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
24
data->rx_packets = stats->rq.packets;
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
25
data->rx_bytes = stats->rq.bytes;
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
1230
err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
1234
icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
40
struct mlx5e_rq_param rq;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
277
struct mlx5e_rq *rq = &c->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
293
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
295
busy |= INDIRECT_CALL_2(rq->post_wqes,
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
298
rq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
318
mlx5e_cq_arm(&rq->cq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
618
struct mlx5e_cq *cq = &c->rq.cq;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
700
struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
706
rq->wq_type = params->rq_wq_type;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
707
rq->pdev = c->pdev;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
708
rq->netdev = priv->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
709
rq->priv = priv;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
710
rq->clock = mdev->clock;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
711
rq->hwtstamp_config = &priv->hwtstamp_config;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
712
rq->mdev = mdev;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
713
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
714
rq->stats = &c->priv->ptp_stats.rq;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
715
rq->ix = MLX5E_PTP_CHANNEL_IX;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
716
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
717
err = mlx5e_rq_set_handlers(rq, params, false);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
721
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
731
err = mlx5e_init_ptp_rq(c, params, &c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
737
return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
767
mlx5e_close_cq(&c->rq.cq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
781
mlx5e_close_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
782
mlx5e_close_cq(&c->rq.cq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
951
mlx5e_activate_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
952
netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
962
netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
963
mlx5e_deactivate_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
979
*rqn = c->rq.rqn;
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
53
struct mlx5e_rq rq;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
700
void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
755
napi_gro_receive(rq->cq.napi, skb);
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
39
void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
68
mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
69
struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
108
mlx5e_deactivate_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
126
mlx5e_free_rx_missing_descs(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
133
mlx5e_activate_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
134
rq->stats->recover++;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
154
struct mlx5e_rq *rq = ctx;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
157
mlx5e_deactivate_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
158
err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
159
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
163
mlx5e_activate_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
164
rq->stats->recover++;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
165
if (rq->channel)
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
166
mlx5e_trigger_napi_icosq(rq->channel);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
168
mlx5e_trigger_napi_sched(rq->cq.napi);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
176
struct mlx5e_rq *rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
179
rq = ctx;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
180
priv = rq->priv;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
187
while (!netdev_trylock(rq->netdev)) {
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
188
if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &rq->priv->state))
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
194
eq = rq->cq.mcq.eq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
196
err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
197
if (err && rq->icosq)
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
198
clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
201
netdev_unlock(rq->netdev);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
241
static void mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
251
test_bit(i, &rq->state));
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
257
mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
266
err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
270
wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
271
wq_head = mlx5e_rqwq_get_head(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
272
wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
274
devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
279
mlx5e_health_rq_put_sw_state(fmsg, rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
280
mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
281
mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
283
if (rq->icosq) {
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
284
struct mlx5e_icosq *icosq = rq->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
288
err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
298
static void mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
302
devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
303
mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
307
static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
310
struct mlx5e_priv *priv = rq->priv;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
316
rq_sz = mlx5e_rqwq_get_size(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
317
real_time = mlx5_is_real_time_rq(rq->mdev);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
318
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(rq->mdev, params, NULL));
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
325
mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
336
mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
343
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
353
static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
358
mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
460
struct mlx5e_rq *rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
462
rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
463
&c->xskrq : &c->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
465
mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
468
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
531
struct mlx5e_rq *rq = ctx;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
546
key.index1 = rq->rqn;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
579
struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
581
mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
585
mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
609
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
612
struct mlx5e_icosq *icosq = rq->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
613
struct mlx5e_priv *priv = rq->priv;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
617
err_ctx.ctx = rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
625
rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
630
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
633
struct mlx5e_priv *priv = rq->priv;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
636
err_ctx.ctx = rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
639
snprintf(err_str, sizeof(err_str), "ERR CQE on RQ: 0x%x", rq->rqn);
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
84
struct mlx5e_rq *rq;
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
93
rq = &icosq->channel->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
100
mlx5e_close_cq(&rq->cq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
12
struct mlx5e_rq *rq = &trap_ctx->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
158
err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
165
mlx5e_close_trap_rq(&t->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
175
mlx5e_close_trap_rq(&trap->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
183
mlx5e_activate_rq(&trap->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
191
mlx5e_deactivate_rq(&trap->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
20
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
22
busy |= rq->post_wqes(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
32
mlx5e_cq_arm(&rq->cq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
40
struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
45
rq->wq_type = params->rq_wq_type;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
46
rq->pdev = t->pdev;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
47
rq->netdev = priv->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
48
rq->priv = priv;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
49
rq->clock = mdev->clock;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
50
rq->hwtstamp_config = &priv->hwtstamp_config;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
51
rq->mdev = mdev;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
52
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
53
rq->stats = &priv->trap_stats.rq;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
54
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
55
xdp_rxq_info_unused(&rq->xdp_rxq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
56
mlx5e_rq_set_trap_handlers(rq, params);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
65
struct mlx5e_rq *rq = &t->rq;
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
80
err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
84
mlx5e_init_trap_rq(t, &t->params, rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
85
err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
92
mlx5e_close_cq(&rq->cq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
97
static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
99
mlx5e_close_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
12
struct mlx5e_rq rq;
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
231
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
233
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
247
struct mlx5e_rq *rq;
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
386
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
388
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
389
mlx5_wq_ll_reset(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
390
rq->mpwqe.actual_wq_head = 0;
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
392
mlx5_wq_cyc_reset(&rq->wqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
412
static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
414
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
416
return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
418
return mlx5_wq_cyc_get_size(&rq->wqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
422
static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
424
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
426
return rq->mpwqe.wq.cur_sz;
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
428
return rq->wqe.wq.cur_sz;
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
432
static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
434
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
436
return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
438
return mlx5_wq_cyc_get_head(&rq->wqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
442
static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
444
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
446
return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
448
return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
579
static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
581
size_t isz = struct_size(rq->mpwqe.info, alloc_units.frag_pages, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
583
return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
88
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
89
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
91
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
92
void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
182
if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->hwtstamp_config)))
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
185
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
186
_ctx->rq->clock, get_cqe_ts(_ctx->cqe));
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
311
bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
323
if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp)))
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
325
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
329
err = xdp_do_redirect(rq->netdev, xdp, prog);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
332
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
333
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
334
rq->stats->xdp_redirect++;
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
337
bpf_warn_invalid_xdp_action(rq->netdev, prog, act);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
341
trace_xdp_exception(rq->netdev, prog, act);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
344
rq->stats->xdp_drop++;
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
61
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
91
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
948
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
950
struct mlx5e_xdpsq *xdpsq = rq->xdpsq;
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
957
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
959
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
100
bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
106
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
91
struct mlx5e_rq *rq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
137
mlx5e_deactivate_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
138
mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
180
mlx5e_activate_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
182
mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
100
mxbuf->rq = rq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
103
__be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
104
rq->xsk_pool->chunk_size);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
105
__be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
112
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
117
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
118
.va = cpu_to_be64(rq->wqe_overflow.addr),
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
121
mxbuf->rq = rq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
125
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
132
offset = ix * rq->mpwqe.mtts_per_wqe;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
133
if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
135
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_OVERSIZED))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
137
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
143
.num_wqebbs = rq->mpwqe.umr_wqebbs,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
144
.umr.rq = rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
147
icosq->pc += rq->mpwqe.umr_wqebbs;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
159
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
163
int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
165
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
173
buffs = rq->wqe.alloc_units->xsk_buffs;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
176
alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
178
alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
180
alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
19
int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
191
frag = &rq->wqe.frags[j];
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
194
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
201
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
203
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
21
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
214
frag = &rq->wqe.frags[j];
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
216
*frag->xskp = xsk_buff_alloc(rq->xsk_pool);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
22
struct mlx5e_icosq *icosq = rq->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
221
wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
228
static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
234
skb = napi_alloc_skb(rq->cq.napi, totallen);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
236
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
250
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
261
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
262
rq->stats->oversize_pkts_sw_drop++;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
294
prog = rcu_dereference(rq->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
295
if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
296
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
304
return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
307
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
31
if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe)))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
328
prog = rcu_dereference(rq->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
329
if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
330
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
339
return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
36
batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
37
rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
45
for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
46
xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
52
pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
54
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
56
if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
64
mxbuf->rq = rq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
66
} else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
72
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
75
mxbuf->rq = rq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
77
} else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
78
u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
85
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
89
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
93
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
97
.key = rq->mkey_be,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
98
.va = cpu_to_be64(rq->wqe_overflow.addr),
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
11
int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
12
int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
13
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
14
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
20
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
132
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
137
err = mlx5e_open_xsk_rq(c, params, &cparam->rq, pool, xsk);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
56
mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
64
struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
70
rq->wq_type = params->rq_wq_type;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
71
rq->pdev = c->pdev;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
72
rq->netdev = c->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
73
rq->priv = c->priv;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
74
rq->hwtstamp_config = &c->priv->hwtstamp_config;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
75
rq->clock = mdev->clock;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
76
rq->icosq = &c->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
77
rq->ix = c->ix;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
78
rq->channel = c;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
79
rq->mdev = mdev;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
80
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
81
rq->xdpsq = &c->rq_xdpsq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
82
rq->xsk_pool = pool;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
83
rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
84
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
86
err = mlx5e_rq_set_handlers(rq, params, xsk);
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
90
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, c->napi.napi_id);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
497
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
501
struct net_device *netdev = rq->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
548
rq->stats->tls_resync_req_start++;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
590
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
593
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
603
resync_update_sn(rq, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
680
priv_rx->rq_stats = &priv->channel_stats[rxq]->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
22
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
90
static inline void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
448
priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
527
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
600
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
622
priv->channel_stats[rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
646
priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
666
priv->channel_stats[rxq]->rq.arfs_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
767
priv->channel_stats[rxq_index]->rq.arfs_request_in++;
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
768
priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++;
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
103
struct mlx5e_channel *c = rq->channel;
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
106
dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu,
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
107
c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
111
rq->dim = dim;
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
113
__set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
115
__clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
117
mlx5e_dim_disable(rq->dim);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
118
rq->dim = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
48
struct mlx5e_rq *rq = dim->priv;
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
52
mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
97
int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
99
if (enable == !!rq->dim)
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2334
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2336
__clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
652
coal->use_adaptive_rx_coalesce = !!c->rq.dim;
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
654
cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode,
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
655
c->rq.dim->profile_ix);
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
732
mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts,
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
900
err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled);
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
914
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1000
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1002
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1003
u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1004
rq->mpwqe.page_shift;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1005
u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1006
0 : rq->buff.headroom;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1010
wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1013
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1016
for (f = 0; f < rq->wqe.info.num_frags; f++) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1017
u32 frag_size = rq->wqe.info.arr[f].frag_size |
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1021
wqe->data[f].lkey = rq->mkey_be;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1024
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1035
page_pool_destroy(rq->page_pool);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1037
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1039
mlx5e_rq_free_shampo(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1041
kvfree(rq->mpwqe.info);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1043
mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1045
mlx5e_free_mpwqe_rq_drop_page(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1048
mlx5e_free_wqe_alloc_info(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1051
mlx5_wq_destroy(&rq->wq_ctrl);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1059
static void mlx5e_free_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1061
kvfree(rq->dim);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1062
page_pool_destroy(rq->page_pool);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1064
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1066
mlx5e_rq_free_shampo(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1067
kvfree(rq->mpwqe.info);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1068
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1069
mlx5e_free_mpwqe_rq_drop_page(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1072
mlx5e_free_wqe_alloc_info(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1075
mlx5_wq_destroy(&rq->wq_ctrl);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1077
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1080
old_prog = rcu_dereference_protected(rq->xdp_prog,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1081
lockdep_is_held(&rq->priv->state_lock));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1085
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1088
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1090
struct mlx5_core_dev *mdev = rq->mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1099
sizeof(u64) * rq->wq_ctrl.buf.npages;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1112
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1116
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1118
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1120
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1122
order_base_2(rq->mpwqe.shampo->hd_per_wq));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1123
MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1126
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1129
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1136
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1138
struct mlx5_core_dev *mdev = rq->mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1151
mlx5e_rqwq_reset(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1158
err = mlx5_core_modify_rq(mdev, rq->rqn, in);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1165
static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1167
struct mlx5_cqwq *cqwq = &rq->cq.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1170
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1181
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1183
struct net_device *dev = rq->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1186
err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1188
netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1192
mlx5e_free_rx_descs(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1193
mlx5e_flush_rq_cq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1195
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1197
netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1204
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1206
struct mlx5_core_dev *mdev = rq->mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1225
err = mlx5_core_modify_rq(mdev, rq->rqn, in);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1232
void mlx5e_destroy_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1234
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1237
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1241
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1244
if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1250
netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1251
rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1253
queue_work(rq->priv->wq, &rq->rx_timeout_work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1258
void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1264
if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1267
wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1276
rq->dealloc_wqe(rq, head);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1280
rq->mpwqe.actual_wq_head = wq->head;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1281
rq->mpwqe.umr_in_progress = 0;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1282
rq->mpwqe.umr_completed = 0;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1285
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1290
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1291
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1293
mlx5e_free_rx_missing_descs(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1301
rq->dealloc_wqe(rq, wqe_ix);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1306
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1312
rq->dealloc_wqe(rq, wqe_ix);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1320
rq->dealloc_wqe(rq, wqe_ix);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1328
struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1330
struct mlx5_core_dev *mdev = rq->mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1334
__set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1336
err = mlx5e_alloc_rq(params, xsk, param, node, rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1340
err = mlx5e_create_rq(rq, param, q_counter);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1344
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1349
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1351
if (rq->channel && !params->rx_dim_enabled) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1352
rq->channel->rx_cq_moder = params->rx_cq_moderation;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1353
} else if (rq->channel) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1359
mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1362
err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1372
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1379
__set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1386
__set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1391
mlx5e_destroy_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1393
mlx5e_free_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1398
void mlx5e_activate_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1400
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1403
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1405
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1409
void mlx5e_close_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1411
if (rq->dim)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1412
cancel_work_sync(&rq->dim->work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1413
cancel_work_sync(&rq->recover_work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1414
cancel_work_sync(&rq->rx_timeout_work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1415
mlx5e_destroy_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1416
mlx5e_free_rx_descs(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1417
mlx5e_free_rq(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2515
err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2519
return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2592
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2593
&c->rq.cq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2622
err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2636
mlx5e_close_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2653
mlx5e_close_cq(&c->rq.cq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2674
mlx5e_close_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2682
mlx5e_close_cq(&c->rq.cq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2869
mlx5e_activate_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2883
mlx5e_deactivate_rq(&c->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2987
err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
320
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
329
ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
330
rq->mpwqe.umr_mode),
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
335
cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
338
octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
343
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
345
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
348
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
350
rq->mpwqe.pages_per_wqe));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
352
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
353
if (!rq->mpwqe.info)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3573
static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3575
mlx5_wq_destroy(&rq->wq_ctrl);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3579
struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3588
err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3589
&rq->wq_ctrl);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3594
xdp_rxq_info_unused(&rq->xdp_rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3596
rq->mdev = mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
360
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
362
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
365
mlx5e_build_umr_wqe(rq, rq->icosq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
366
container_of(&rq->mpwqe.umr_wqe,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3683
err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3688
return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
3988
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
4013
struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
4262
dim_enabled = !!chs->c[i]->rq.dim;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
495
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
497
u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
498
u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
503
max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
506
if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
510
__func__, wq_size, rq->mpwqe.mtts_per_wqe,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5129
static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
513
err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5133
old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5134
lockdep_is_held(&rq->priv->state_lock));
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
514
&umr_mkey, rq->wqe_overflow.addr,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
515
rq->mpwqe.umr_mode, xsk_chunk_size);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
516
rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5181
mlx5e_rq_replace_xdp_prog(&c->rq, prog);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
520
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
526
WARN_ON(rq->xsk_pool);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
528
next_frag.frag_page = &rq->wqe.alloc_units->frag_pages[0];
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
533
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
534
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
536
&rq->wqe.frags[i << rq->wqe.info.log_num_frags];
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
539
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5433
rq_stats = &channel_stats->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5487
struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
559
static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
564
WARN_ON(rq->wqe.info.num_frags != 1);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
565
WARN_ON(rq->wqe.info.log_num_frags != 0);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
566
WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
571
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
572
rq->wqe.frags[i].xskp = &rq->wqe.alloc_units->xsk_buffs[i];
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
577
rq->wqe.frags[i].flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
581
static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
583
int wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
584
int len = wq_sz << rq->wqe.info.log_num_frags;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
589
if (rq->xsk_pool)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
604
rq->wqe.alloc_units = aus;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
605
rq->wqe.frags = frags;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
607
if (rq->xsk_pool)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
608
mlx5e_init_xsk_buffs(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
610
mlx5e_init_frags_partition(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
615
static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
617
kvfree(rq->wqe.frags);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
618
kvfree(rq->wqe.alloc_units);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
623
struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
625
mlx5e_reporter_rq_cqe_err(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
630
struct mlx5e_rq *rq = container_of(timeout_work,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
634
mlx5e_reporter_rx_timeout(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
637
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
639
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
640
if (!rq->wqe_overflow.page)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
643
rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
644
PAGE_SIZE, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
645
if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
646
__free_page(rq->wqe_overflow.page);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
652
static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
654
dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
655
rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
656
__free_page(rq->wqe_overflow.page);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
660
u32 xdp_frag_size, struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
665
rq->wq_type = params->rq_wq_type;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
666
rq->pdev = c->pdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
667
rq->netdev = c->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
668
rq->priv = c->priv;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
669
rq->hwtstamp_config = &c->priv->hwtstamp_config;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
670
rq->clock = mdev->clock;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
671
rq->icosq = &c->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
672
rq->ix = c->ix;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
673
rq->channel = c;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
674
rq->mdev = mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
675
rq->hw_mtu =
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
677
rq->xdpsq = &c->rq_xdpsq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
678
rq->stats = &c->priv->channel_stats[c->ix]->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
679
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
680
err = mlx5e_rq_set_handlers(rq, params, NULL);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
684
return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
688
static void mlx5e_release_rq_hd_pages(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
698
dma_unmap_page(rq->pdev, info->addr, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
699
rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
704
static int mlx5e_alloc_rq_hd_pages(struct mlx5e_rq *rq, int node,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
718
addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
719
rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
720
err = dma_mapping_error(rq->pdev, addr);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
733
mlx5e_release_rq_hd_pages(rq, shampo);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
784
struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
791
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
811
err = mlx5e_alloc_rq_hd_pages(rq, node, shampo);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
820
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
821
if (!rq->hw_gro_data) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
826
rq->mpwqe.shampo = shampo;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
833
mlx5e_release_rq_hd_pages(rq, shampo);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
840
static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
842
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
847
kvfree(rq->hw_gro_data);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
848
mlx5_core_destroy_mkey(rq->mdev, shampo->mkey);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
849
mlx5e_release_rq_hd_pages(rq, shampo);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
856
int node, struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
858
struct mlx5_core_dev *mdev = rq->mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
867
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
868
INIT_WORK(&rq->rx_timeout_work, mlx5e_rq_timeout_work);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
872
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
874
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
875
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
878
rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
880
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
882
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
883
&rq->wq_ctrl);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
887
err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
891
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
893
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
895
rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
896
rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
897
rq->mpwqe.pages_per_wqe =
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
898
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
899
rq->mpwqe.umr_mode);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
900
rq->mpwqe.umr_wqebbs =
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
901
mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
902
rq->mpwqe.umr_mode);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
903
rq->mpwqe.mtts_per_wqe =
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
904
mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
905
rq->mpwqe.umr_mode);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
907
pool_size = rq->mpwqe.pages_per_wqe <<
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
913
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
914
rq->mpwqe.num_strides =
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
916
rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
918
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
920
err = mlx5e_create_rq_umr_mkey(mdev, rq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
924
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
928
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
934
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
935
&rq->wq_ctrl);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
939
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
941
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
943
rq->wqe.info = rqp->frags_info;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
944
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
946
err = mlx5e_init_wqe_alloc_info(rq, node);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
952
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
956
xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
965
pp_params.dev = rq->pdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
966
pp_params.napi = rq->cq.napi;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
967
pp_params.netdev = rq->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
968
pp_params.dma_dir = rq->buff.map_dir;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
970
pp_params.queue_idx = rq->ix;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
973
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
981
rq->page_pool = page_pool_create(&pp_params);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
982
if (IS_ERR(rq->page_pool)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
983
err = PTR_ERR(rq->page_pool);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
984
rq->page_pool = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
987
if (!rq->hd_page_pool)
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
988
rq->hd_page_pool = rq->page_pool;
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
989
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
990
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
991
MEM_TYPE_PAGE_POOL, rq->page_pool);
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
998
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
101
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1034
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1036
int udp_off = rq->hw_gro_data->fk.control.thoff;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1037
struct sk_buff *skb = rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1053
static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1055
int udp_off = rq->hw_gro_data->fk.control.thoff;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1056
struct sk_buff *skb = rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
106
mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1072
static void mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1077
u32 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1078
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1087
static void *mlx5e_shampo_get_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
109
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1093
mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1095
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1096
len, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1101
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1104
int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1109
last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1115
static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1118
int tcp_off = rq->hw_gro_data->fk.control.thoff;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1119
struct sk_buff *skb = rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1124
mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1129
if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
113
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1130
bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1143
static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1146
int tcp_off = rq->hw_gro_data->fk.control.thoff;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1147
struct sk_buff *skb = rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1152
mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1164
static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1166
bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1167
struct sk_buff *skb = rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1173
int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1181
mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1183
mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1185
int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
119
rq->stats->cqe_compress_blks++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1191
mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1193
mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1222
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1236
rq->stats->ecn_mark += !!rc;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1324
struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1328
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1342
if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1370
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1400
struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1404
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1405
struct net_device *netdev = rq->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1410
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1437
if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config)))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1438
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1439
rq->clock, get_cqe_ts(cqe));
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1440
skb_record_rx_queue(skb, rq->ix);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1453
mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1456
mlx5e_enable_ecn(rq, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1466
static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1471
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1478
if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1482
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1483
napi_gro_receive(rq->cq.napi, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1484
rq->hw_gro_data->skb = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1489
static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1494
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1498
return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1502
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1509
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1522
static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1526
xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1529
mxbuf->rq = rq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1533
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1537
u16 rx_headroom = rq->buff.headroom;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
154
static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1550
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1551
frag_size, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1554
prog = rcu_dereference(rq->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1556
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1559
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1561
if (mlx5e_xdp_handle(rq, prog, mxbuf))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1569
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
158
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1581
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1584
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1585
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1587
u16 rx_headroom = rq->buff.headroom;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1604
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1605
rq->buff.frame0_sz, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1609
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1623
mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1633
prog = rcu_dereference(rq->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1637
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1639
rq->flags)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1654
rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1677
static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1680
struct mlx5e_priv *priv = rq->priv;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1683
!test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1684
mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1685
queue_work(priv->wq, &rq->recover_work);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1689
static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1691
trigger_report(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1692
rq->stats->wqe_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1695
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1697
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
170
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1704
wi = get_frag(rq, ci);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1708
mlx5e_handle_rx_err_cqe(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1712
skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1716
rq, wi, cqe, cqe_bcnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1719
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1724
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1733
napi_gro_receive(rq->cq.napi, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1740
static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1742
struct net_device *netdev = rq->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1746
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1753
wi = get_frag(rq, ci);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1757
mlx5e_handle_rx_err_cqe(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1761
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1764
rq, wi, cqe, cqe_bcnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1767
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
177
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1772
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1778
mlx5e_rep_tc_receive(cqe, rq, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1784
static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1788
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1790
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1791
u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1792
u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1801
mlx5e_handle_rx_err_cqe(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1806
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
181
mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1815
skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1818
rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1822
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1825
mlx5e_rep_tc_receive(cqe, rq, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1828
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1831
wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
184
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1843
mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1854
mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1864
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1871
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
188
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1884
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1888
rq->stats->oversize_pkts_sw_drop++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1893
prog = rcu_dereference(rq->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1898
if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
190
mlx5e_decompress_cqe(rq, wq, cqcc);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1900
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1910
skb = napi_alloc_skb(rq->cq.napi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1913
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1932
mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1942
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1945
truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1947
mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
195
static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1960
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1961
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1969
mlx5e_page_release_fragmented(rq->page_pool,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1979
BIT(rq->mpwqe.log_stride_sz));
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1984
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1988
mlx5e_page_release_fragmented(rq->page_pool,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1995
mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
200
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2033
mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2044
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2049
u16 rx_headroom = rq->buff.headroom;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2058
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2059
rq->stats->oversize_pkts_sw_drop++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2068
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2069
frag_size, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2072
prog = rcu_dereference(rq->xdp_prog);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2074
struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2077
mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2079
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2080
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2090
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2102
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2112
skb = napi_alloc_skb(rq->cq.napi, len);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2114
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2120
mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2121
mlx5e_copy_skb_header(rq, skb, page_to_netmem(di->page), di->addr,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2143
mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2145
struct sk_buff *skb = rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2146
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2150
mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2156
mlx5e_shampo_update_hdr(rq, cqe, match);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
216
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2160
napi_gro_receive(rq->cq.napi, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2161
rq->hw_gro_data->skb = NULL;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
217
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2175
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2178
u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2186
struct sk_buff **skb = &rq->hw_gro_data->skb;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2189
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
219
rq, &cqd->title);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2194
wi = mlx5e_get_mpw_info(rq, wqe_id);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2198
mlx5e_handle_rx_err_cqe(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2210
mlx5e_shampo_flush_skb(rq, cqe, match);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2215
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
222
rq->stats->cqe_compress_pkts += left;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2225
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2240
rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2241
int len = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2246
last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2248
rq->hw_gro_data->second_ip_id = ntohs(iph->id);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2257
mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2267
if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
227
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2271
if (flush && rq->hw_gro_data->skb)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2272
mlx5e_shampo_flush_skb(rq, cqe, match);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2274
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2280
wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2285
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2289
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2291
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2292
u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2293
u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2302
mlx5e_handle_rx_err_cqe(rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2307
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2316
skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
232
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2320
rq, wi, cqe, cqe_bcnt, head_offset,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2325
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2334
napi_gro_receive(rq->cq.napi, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2337
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2340
wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2345
static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2350
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2359
rq->stats->cqe_compress_blks++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2366
mlx5e_read_enhanced_title_slot(rq, title_cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2368
rq->stats->cqe_compress_blks++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2371
mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2378
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2380
rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2387
mlx5e_read_enhanced_title_slot(rq, title_cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2394
static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2401
if (rq->cqd.left)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2402
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2407
mlx5e_decompress_cqes_start(rq, cqwq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2413
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2415
rq, cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2424
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2428
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2431
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2432
work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2435
work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
244
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2441
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2442
mlx5e_shampo_flush_skb(rq, NULL, false);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2444
if (rcu_access_pointer(rq->xdp_prog))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2445
mlx5e_xdp_rx_poll_complete(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
245
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2461
static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
247
rq, &cqd->title);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2476
netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2489
stats = &priv->channel_stats[rq->ix]->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
252
rq->stats->cqe_compress_pkts += cqe_count;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2525
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2526
rq->clock, get_cqe_ts(cqe));
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2527
skb_record_rx_queue(skb, rq->ix);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2544
static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2546
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2553
wi = get_frag(rq, ci);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2557
rq->stats->wqe_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2561
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2564
rq, wi, cqe, cqe_bcnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2568
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
257
static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2573
napi_gro_receive(rq->cq.napi, skb);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2585
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2587
struct net_device *netdev = rq->netdev;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2588
struct mlx5_core_dev *mdev = rq->mdev;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2589
struct mlx5e_priv *priv = rq->priv;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2591
switch (rq->wq_type) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2593
rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2598
rq->post_wqes = mlx5e_post_rx_mpwqes;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2599
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2602
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2603
if (!rq->handle_rx_cqe) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2608
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2609
if (!rq->handle_rx_cqe) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
261
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2617
rq->wqe.skb_from_cqe = xsk ?
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2622
rq->post_wqes = mlx5e_post_rx_wqes;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2623
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2624
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2625
if (!rq->handle_rx_cqe) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2634
static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2636
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
264
mlx5e_read_title_slot(rq, wq, cc);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2645
wi = get_frag(rq, ci);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2649
rq->stats->wqe_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2653
skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2657
if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
266
mlx5e_decompress_cqe(rq, wq, cc);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2661
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2662
rq->netdev->devlink_port);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2669
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
267
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2671
rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2674
rq->post_wqes = mlx5e_post_rx_wqes;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2675
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2676
rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
269
rq, &cqd->title);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
272
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
305
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
316
err = mlx5e_page_alloc_fragmented(rq->page_pool,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
332
static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
336
mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
339
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
341
return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
344
static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
347
struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
351
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
355
err = mlx5e_get_rx_frag(rq, frag);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
361
headroom = i == 0 ? rq->buff.headroom : 0;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
370
mlx5e_put_rx_frag(rq, --frag);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
375
static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
380
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
381
mlx5e_put_rx_frag(rq, wi);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
390
static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
392
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
394
if (rq->xsk_pool) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
397
mlx5e_free_rx_wqe(rq, wi);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
403
for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
408
static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
410
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
417
wi = get_frag(rq, j);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
426
static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
428
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
435
wi = get_frag(rq, j);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
436
mlx5e_free_rx_wqe(rq, wi);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
440
static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
442
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
451
if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
458
static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
470
refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
472
mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
473
refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
484
mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
487
int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
490
frag = get_frag(rq, j);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
491
for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
499
mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
508
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
527
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
536
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
537
rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
550
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
558
dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
559
rq->buff.map_dir);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
564
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
570
if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
573
no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
575
if (rq->xsk_pool) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
582
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
586
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
591
mlx5e_page_release_fragmented(rq->page_pool,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
598
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
600
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
614
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
616
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
617
struct mlx5e_icosq *sq = rq->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
628
pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
630
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
634
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
637
err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
65
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
650
if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
651
int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
652
rq->mpwqe.pages_per_wqe;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
654
memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
658
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
665
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
670
.num_wqebbs = rq->mpwqe.umr_wqebbs,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
671
.umr.rq = rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
674
sq->pc += rq->mpwqe.umr_wqebbs;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
685
mlx5e_page_release_fragmented(rq->page_pool, frag_page);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
688
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
69
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
690
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
695
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
697
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
699
mlx5e_free_rx_mpwqe(rq, wi);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
704
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
707
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
709
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
714
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
717
if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
72
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
720
if (rq->page_pool)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
721
page_pool_nid_changed(rq->page_pool, numa_mem_id());
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
729
wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
73
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
731
if (!rq->xsk_pool) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
732
count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
733
} else if (likely(!dma_dev_need_sync(rq->pdev))) {
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
734
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
735
count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
737
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
74
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
743
count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
748
rq->stats->buff_alloc_err++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
843
wi->umr.rq->mpwqe.umr_completed++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
872
static void mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq *rq, int head,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
875
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
880
mlx5e_dealloc_rx_mpwqe(rq, head);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
884
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
886
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
887
u8 umr_completed = rq->mpwqe.umr_completed;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
888
struct mlx5e_icosq *sq = rq->icosq;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
894
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
898
mlx5e_post_rx_mpwqe(rq, umr_completed);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
899
rq->mpwqe.umr_in_progress -= umr_completed;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
90
static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
900
rq->mpwqe.umr_completed = 0;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
903
missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
905
if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
906
rq->stats->congst_umr++;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
908
if (likely(missing < rq->mpwqe.min_wqe_bulk))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
911
if (rq->page_pool)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
912
page_pool_nid_changed(rq->page_pool, numa_mem_id());
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
913
if (rq->hd_page_pool)
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
914
page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
916
head = rq->mpwqe.actual_wq_head;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
919
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
922
mlx5e_free_rx_mpwqe(rq, wi);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
925
alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
926
mlx5e_alloc_rx_mpwqe(rq, head);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
93
struct mlx5e_cq_decomp *cqd = &rq->cqd;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
933
mlx5e_reclaim_mpwqe_pages(rq, head, reclaim);
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
941
rq->mpwqe.umr_last_bulk = missing - i;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
947
rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
948
rq->mpwqe.actual_wq_head = head;
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
956
if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
98
if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2579
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
2649
&priv->channel_stats[i]->rq,
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
467
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
495
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
496
struct page_pool *pool = c->rq.page_pool;
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
532
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
133
struct mlx5e_rq *rq = &c->rq;
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
179
work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
197
busy |= INDIRECT_CALL_2(rq->post_wqes,
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
200
rq);
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
239
mlx5e_handle_rx_dim(rq);
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
241
mlx5e_cq_arm(&rq->cq);
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
61
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
63
struct mlx5e_rq_stats *stats = rq->stats;
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
66
if (unlikely(!test_bit(MLX5E_RQ_STATE_DIM, &rq->state)))
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
69
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
70
net_dim(rq->dim, &dim_sample);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
110
if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
115
ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
116
data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
121
conn->qp.rq.pc++;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
122
conn->qp.rq.bufs[ix] = buf;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
126
*conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
256
ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
257
buf = conn->qp.rq.bufs[ix];
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
258
conn->qp.rq.bufs[ix] = NULL;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
259
conn->qp.rq.cc++;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
520
conn->qp.rq.pc = 0;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
521
conn->qp.rq.cc = 0;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
522
conn->qp.rq.size = roundup_pow_of_two(rx_size);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
528
MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
534
conn->qp.rq.bufs = kvzalloc_objs(conn->qp.rq.bufs[0], conn->qp.rq.size);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
535
if (!conn->qp.rq.bufs) {
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
565
MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
591
kvfree(conn->qp.rq.bufs);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
603
for (ix = 0; ix < conn->qp.rq.size; ix++) {
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
604
if (!conn->qp.rq.bufs[ix])
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
606
mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
607
kfree(conn->qp.rq.bufs[ix]);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
608
conn->qp.rq.bufs[ix] = NULL;
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
647
kvfree(conn->qp.rq.bufs);
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h
82
} rq;
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
146
rq_stats = &channel_stats->rq;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
709
struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
723
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
724
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
725
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
728
if (rq->extts.index >= clock->ptp_info.n_pins)
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
731
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
737
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
812
static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
818
ts.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
819
ts.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
825
*time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
826
perout_conf_internal_timer(mdev, rq->perout.start.sec);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
833
struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
840
if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
841
ts.tv_sec = rq->perout.on.sec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
842
ts.tv_nsec = rq->perout.on.nsec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
848
ts.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
849
ts.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
865
static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
870
struct ptp_clock_time *time = &rq->perout.start;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
873
ts.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
874
ts.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
880
*period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
882
if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
893
struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
910
if (rq->perout.index >= clock->ptp_info.n_pins)
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
914
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
931
if (rt_mode && rq->perout.start.sec > U32_MAX) {
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
941
err = perout_conf_npps_real_time(mdev, rq, &field_select,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
945
err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
973
struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
984
struct ptp_clock_request *rq,
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
987
switch (rq->type) {
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
989
return mlx5_extts_configure(ptp, rq, on);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
991
return mlx5_perout_configure(ptp, rq, on);
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
993
return mlx5_pps_configure(ptp, rq, on);
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
268
dr_qp->rq.pc = 0;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
269
dr_qp->rq.cc = 0;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
270
dr_qp->rq.wqe_cnt = 256;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
277
MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
317
MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
1433
} rq;
drivers/net/ethernet/mellanox/mlx5/core/wq.c
128
mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
drivers/net/ethernet/mellanox/mlx5/core/wq.c
146
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
drivers/net/ethernet/mellanox/mlx5/core/wq.h
60
struct mlx5_wq_cyc rq;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
834
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
844
pin = ptp_find_pin(phc->clock, PTP_PF_PEROUT, rq->perout.index);
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
861
if (rq->perout.period.sec == 1 &&
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
862
rq->perout.period.nsec == 0)
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
865
if (rq->perout.flags & PTP_PEROUT_PHASE) {
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
866
ts_phase.tv_sec = rq->perout.phase.sec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
867
ts_phase.tv_nsec = rq->perout.phase.nsec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
869
ts_phase.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
870
ts_phase.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
879
if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
882
ts_on.tv_sec = rq->perout.on.sec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
883
ts_on.tv_nsec = rq->perout.on.nsec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
907
ts_period.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
908
ts_period.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
931
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
942
pin = ptp_find_pin(phc->clock, PTP_PF_EXTTS, rq->extts.index);
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
970
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
972
switch (rq->type) {
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
974
return lan966x_ptp_perout(ptp, rq, on);
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
976
return lan966x_ptp_extts(ptp, rq, on);
drivers/net/ethernet/microsoft/mana/gdma_main.c
354
e.rq.id = qid;
drivers/net/ethernet/microsoft/mana/gdma_main.c
355
e.rq.tail_ptr = tail_ptr;
drivers/net/ethernet/microsoft/mana/gdma_main.c
356
e.rq.wqe_cnt = num_req;
drivers/net/ethernet/microsoft/mana/hw_channel.c
250
struct gdma_queue *rq;
drivers/net/ethernet/microsoft/mana/hw_channel.c
259
rq = hwc_rxq->gdma_wq;
drivers/net/ethernet/microsoft/mana/hw_channel.c
260
wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
drivers/net/ethernet/microsoft/mana/hw_channel.c
664
struct gdma_queue *rq = hwc->rxq->gdma_wq;
drivers/net/ethernet/microsoft/mana/hw_channel.c
675
rq->mem_info.dma_handle,
drivers/net/ethernet/mscc/ocelot_ptp.c
200
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/mscc/ocelot_ptp.c
212
switch (rq->type) {
drivers/net/ethernet/mscc/ocelot_ptp.c
215
rq->perout.index);
drivers/net/ethernet/mscc/ocelot_ptp.c
227
ts_period.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/mscc/ocelot_ptp.c
228
ts_period.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/mscc/ocelot_ptp.c
242
if (rq->perout.flags & PTP_PEROUT_PHASE) {
drivers/net/ethernet/mscc/ocelot_ptp.c
243
ts_phase.tv_sec = rq->perout.phase.sec;
drivers/net/ethernet/mscc/ocelot_ptp.c
244
ts_phase.tv_nsec = rq->perout.phase.nsec;
drivers/net/ethernet/mscc/ocelot_ptp.c
247
ts_phase.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/mscc/ocelot_ptp.c
248
ts_phase.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/mscc/ocelot_ptp.c
259
if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
drivers/net/ethernet/mscc/ocelot_ptp.c
262
ts_on.tv_sec = rq->perout.on.sec;
drivers/net/ethernet/mscc/ocelot_ptp.c
263
ts_on.tv_nsec = rq->perout.on.nsec;
drivers/net/ethernet/natsemi/natsemi.c
3072
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/natsemi/natsemi.c
3074
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/natsemi/natsemi.c
638
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/packetengines/hamachi.c
1875
static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/net/ethernet/packetengines/hamachi.c
1879
u32 *d = (u32 *)&rq->ifr_ifru;
drivers/net/ethernet/packetengines/hamachi.c
1903
static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/packetengines/hamachi.c
1906
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/packetengines/hamachi.c
549
static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/packetengines/hamachi.c
550
static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/net/ethernet/packetengines/yellowfin.c
1352
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/packetengines/yellowfin.c
1356
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/packetengines/yellowfin.c
344
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/qlogic/qede/qede_ptp.c
123
struct ptp_clock_request *rq,
drivers/net/ethernet/realtek/8139cp.c
1606
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/realtek/8139cp.c
1616
rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
drivers/net/ethernet/realtek/8139too.c
2486
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/realtek/8139too.c
2495
rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
drivers/net/ethernet/realtek/8139too.c
651
static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/renesas/rcar_gen4_ptp.c
127
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1941
static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
1952
ret = phy_do_ioctl(dev, rq, cmd);
drivers/net/ethernet/sgi/ioc3-eth.c
104
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/sgi/ioc3-eth.c
1222
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/sgi/ioc3-eth.c
1228
rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
drivers/net/ethernet/sgi/meth.c
765
static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/sis/sis900.c
2217
static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/sis/sis900.c
2220
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/sis/sis900.c
230
static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/smsc/epic100.c
1472
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/smsc/epic100.c
1476
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/smsc/epic100.c
294
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/smsc/smc91c92_cs.c
1997
static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/smsc/smc91c92_cs.c
2000
struct mii_ioctl_data *mii = if_mii(rq);
drivers/net/ethernet/smsc/smc91c92_cs.c
273
static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
566
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
574
switch (rq->type) {
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
6311
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
6323
ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
165
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
175
switch (rq->type) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
182
if (rq->perout.flags)
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
185
cfg = &priv->pps[rq->perout.index];
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
187
cfg->start.tv_sec = rq->perout.start.sec;
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
188
cfg->start.tv_nsec = rq->perout.start.nsec;
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
214
cfg->period.tv_sec = rq->perout.period.sec;
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
215
cfg->period.tv_nsec = rq->perout.period.nsec;
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
219
rq->perout.index, cfg, on,
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
237
rq->extts.index, channel);
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
245
acr_value |= PTP_ACR_ATSEN(rq->extts.index);
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
251
rq->extts.index, on ? "enabled" : "disabled");
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
102
struct ptp_clock_request *rq, int on);
drivers/net/ethernet/ti/am65-cpts.c
720
struct ptp_clock_request rq;
drivers/net/ethernet/ti/am65-cpts.c
736
rq.perout.period.sec = 1;
drivers/net/ethernet/ti/am65-cpts.c
737
rq.perout.period.nsec = 0;
drivers/net/ethernet/ti/am65-cpts.c
738
rq.perout.start.sec = ts.tv_sec + 2;
drivers/net/ethernet/ti/am65-cpts.c
739
rq.perout.start.nsec = 0;
drivers/net/ethernet/ti/am65-cpts.c
740
rq.perout.index = cpts->pps_genf_idx;
drivers/net/ethernet/ti/am65-cpts.c
742
am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
drivers/net/ethernet/ti/am65-cpts.c
745
rq.perout.index = cpts->pps_genf_idx;
drivers/net/ethernet/ti/am65-cpts.c
746
am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
drivers/net/ethernet/ti/am65-cpts.c
759
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/ti/am65-cpts.c
763
switch (rq->type) {
drivers/net/ethernet/ti/am65-cpts.c
765
return am65_cpts_extts_enable(cpts, rq->extts.index, on);
drivers/net/ethernet/ti/am65-cpts.c
767
return am65_cpts_perout_enable(cpts, &rq->perout, on);
drivers/net/ethernet/ti/cpts.c
300
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/ti/cpts.c
304
switch (rq->type) {
drivers/net/ethernet/ti/cpts.c
306
return cpts_extts_enable(cpts, rq->extts.index, on);
drivers/net/ethernet/ti/icssg/icss_iep.c
580
struct ptp_clock_request rq;
drivers/net/ethernet/ti/icssg/icss_iep.c
595
rq.perout.index = 0;
drivers/net/ethernet/ti/icssg/icss_iep.c
599
rq.perout.flags = 0;
drivers/net/ethernet/ti/icssg/icss_iep.c
600
rq.perout.period.sec = 1;
drivers/net/ethernet/ti/icssg/icss_iep.c
601
rq.perout.period.nsec = 0;
drivers/net/ethernet/ti/icssg/icss_iep.c
602
rq.perout.start.sec = ts.tv_sec + 2;
drivers/net/ethernet/ti/icssg/icss_iep.c
603
rq.perout.start.nsec = 0;
drivers/net/ethernet/ti/icssg/icss_iep.c
604
rq.perout.on.sec = 0;
drivers/net/ethernet/ti/icssg/icss_iep.c
605
rq.perout.on.nsec = NSEC_PER_MSEC;
drivers/net/ethernet/ti/icssg/icss_iep.c
606
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
drivers/net/ethernet/ti/icssg/icss_iep.c
608
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
drivers/net/ethernet/ti/icssg/icss_iep.c
655
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/ti/icssg/icss_iep.c
659
switch (rq->type) {
drivers/net/ethernet/ti/icssg/icss_iep.c
661
return icss_iep_perout_enable(iep, &rq->perout, on);
drivers/net/ethernet/ti/icssg/icss_iep.c
665
return icss_iep_extts_enable(iep, rq->extts.index, on);
drivers/net/ethernet/ti/tlan.c
161
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/ti/tlan.c
936
static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/ti/tlan.c
939
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/tundra/tsi108_eth.c
1512
static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/tundra/tsi108_eth.c
1517
return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
drivers/net/ethernet/via/via-rhine.c
2393
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/via/via-rhine.c
2402
rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
drivers/net/ethernet/via/via-rhine.c
512
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/via/via-velocity.c
2428
static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/via/via-velocity.c
2443
ret = velocity_mii_ioctl(dev, rq, cmd);
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
412
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
422
if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp)
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
426
if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
430
if (rq->perout.phase.sec || rq->perout.phase.nsec) {
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
435
if (rq->perout.period.sec != 1 || rq->perout.period.nsec) {
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
440
if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
443
ts_on.tv_sec = rq->perout.on.sec;
drivers/net/ethernet/wangxun/libwx/wx_ptp.c
444
ts_on.tv_nsec = rq->perout.on.nsec;
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1827
static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
1834
return phylink_mii_ioctl(lp->phylink, rq, cmd);
drivers/net/ethernet/xilinx/xilinx_emaclite.c
1216
static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/xilinx/xilinx_emaclite.c
1225
return phy_mii_ioctl(dev->phydev, rq, cmd);
drivers/net/ethernet/xircom/xirc2ps_cs.c
1418
do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/net/ethernet/xircom/xirc2ps_cs.c
1422
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/ethernet/xircom/xirc2ps_cs.c
1425
dev->name, rq->ifr_ifrn.ifrn_name, cmd,
drivers/net/ethernet/xircom/xirc2ps_cs.c
298
static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/net/ethernet/xscale/ptp_ixp46x.c
191
struct ptp_clock_request *rq, int on)
drivers/net/ethernet/xscale/ptp_ixp46x.c
195
switch (rq->type) {
drivers/net/ethernet/xscale/ptp_ixp46x.c
197
switch (rq->extts.index) {
drivers/net/fddi/skfp/skfddi.c
108
static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/net/fddi/skfp/skfddi.c
961
static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
drivers/net/ifb.c
100
skb_queue_splice_tail_init(&txp->rq, &txp->tq);
drivers/net/ifb.c
136
skb = skb_peek(&txp->rq);
drivers/net/ifb.c
197
__skb_queue_head_init(&txp->rq);
drivers/net/ifb.c
304
__skb_queue_purge(&txp->rq);
drivers/net/ifb.c
352
if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
drivers/net/ifb.c
355
__skb_queue_tail(&txp->rq, skb);
drivers/net/ifb.c
52
struct sk_buff_head rq;
drivers/net/netdevsim/netdev.c
1003
ns->rq = kzalloc_objs(*ns->rq, dev->num_rx_queues, GFP_KERNEL_ACCOUNT);
drivers/net/netdevsim/netdev.c
1004
if (!ns->rq)
drivers/net/netdevsim/netdev.c
1008
ns->rq[i] = nsim_queue_alloc();
drivers/net/netdevsim/netdev.c
1009
if (!ns->rq[i])
drivers/net/netdevsim/netdev.c
1017
kfree(ns->rq[i]);
drivers/net/netdevsim/netdev.c
1018
kfree(ns->rq);
drivers/net/netdevsim/netdev.c
1028
nsim_queue_free(dev, ns->rq[i]);
drivers/net/netdevsim/netdev.c
1030
kfree(ns->rq);
drivers/net/netdevsim/netdev.c
1031
ns->rq = NULL;
drivers/net/netdevsim/netdev.c
106
struct nsim_rq *rq,
drivers/net/netdevsim/netdev.c
120
return nsim_napi_rx(tx_dev, rx_dev, rq, skb);
drivers/net/netdevsim/netdev.c
131
struct nsim_rq *rq;
drivers/net/netdevsim/netdev.c
157
rq = peer_ns->rq[rxq];
drivers/net/netdevsim/netdev.c
168
skb, rq, psp_ext) == NET_RX_DROP))
drivers/net/netdevsim/netdev.c
171
if (!hrtimer_active(&rq->napi_timer))
drivers/net/netdevsim/netdev.c
172
hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
drivers/net/netdevsim/netdev.c
40
static void nsim_start_peer_tx_queue(struct net_device *dev, struct nsim_rq *rq)
drivers/net/netdevsim/netdev.c
413
static int nsim_rcv(struct nsim_rq *rq, int budget)
drivers/net/netdevsim/netdev.c
415
struct net_device *dev = rq->napi.dev;
drivers/net/netdevsim/netdev.c
426
if (skb_queue_empty(&rq->skb_queue))
drivers/net/netdevsim/netdev.c
429
skb = skb_dequeue(&rq->skb_queue);
drivers/net/netdevsim/netdev.c
446
napi_gro_receive(&rq->napi, skb);
drivers/net/netdevsim/netdev.c
449
nsim_start_peer_tx_queue(dev, rq);
drivers/net/netdevsim/netdev.c
455
struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
drivers/net/netdevsim/netdev.c
458
done = nsim_rcv(rq, budget);
drivers/net/netdevsim/netdev.c
48
idx = rq->napi.index;
drivers/net/netdevsim/netdev.c
489
struct nsim_rq *rq;
drivers/net/netdevsim/netdev.c
493
rq = ns->rq[i];
drivers/net/netdevsim/netdev.c
495
netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i);
drivers/net/netdevsim/netdev.c
499
rq = ns->rq[i];
drivers/net/netdevsim/netdev.c
501
err = nsim_create_page_pool(&rq->page_pool, &rq->napi);
drivers/net/netdevsim/netdev.c
510
page_pool_destroy(ns->rq[i]->page_pool);
drivers/net/netdevsim/netdev.c
511
ns->rq[i]->page_pool = NULL;
drivers/net/netdevsim/netdev.c
515
__netif_napi_del_locked(&ns->rq[i]->napi);
drivers/net/netdevsim/netdev.c
522
struct nsim_rq *rq;
drivers/net/netdevsim/netdev.c
524
rq = container_of(timer, struct nsim_rq, napi_timer);
drivers/net/netdevsim/netdev.c
525
napi_schedule(&rq->napi);
drivers/net/netdevsim/netdev.c
530
static void nsim_rq_timer_init(struct nsim_rq *rq)
drivers/net/netdevsim/netdev.c
532
hrtimer_setup(&rq->napi_timer, nsim_napi_schedule, CLOCK_MONOTONIC,
drivers/net/netdevsim/netdev.c
542
struct nsim_rq *rq = ns->rq[i];
drivers/net/netdevsim/netdev.c
544
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
drivers/net/netdevsim/netdev.c
545
napi_enable_locked(&rq->napi);
drivers/net/netdevsim/netdev.c
578
struct nsim_rq *rq = ns->rq[i];
drivers/net/netdevsim/netdev.c
580
napi_disable_locked(&rq->napi);
drivers/net/netdevsim/netdev.c
581
__netif_napi_del_locked(&rq->napi);
drivers/net/netdevsim/netdev.c
586
page_pool_destroy(ns->rq[i]->page_pool);
drivers/net/netdevsim/netdev.c
587
ns->rq[i]->page_pool = NULL;
drivers/net/netdevsim/netdev.c
70
struct nsim_rq *rq,
drivers/net/netdevsim/netdev.c
727
struct nsim_rq *rq;
drivers/net/netdevsim/netdev.c
729
rq = kzalloc_obj(*rq, GFP_KERNEL_ACCOUNT);
drivers/net/netdevsim/netdev.c
730
if (!rq)
drivers/net/netdevsim/netdev.c
733
skb_queue_head_init(&rq->skb_queue);
drivers/net/netdevsim/netdev.c
734
nsim_rq_timer_init(rq);
drivers/net/netdevsim/netdev.c
735
return rq;
drivers/net/netdevsim/netdev.c
738
static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
drivers/net/netdevsim/netdev.c
740
hrtimer_cancel(&rq->napi_timer);
drivers/net/netdevsim/netdev.c
742
if (rq->skb_queue.qlen) {
drivers/net/netdevsim/netdev.c
744
dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
drivers/net/netdevsim/netdev.c
748
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
drivers/net/netdevsim/netdev.c
749
kfree(rq);
drivers/net/netdevsim/netdev.c
759
struct nsim_rq *rq;
drivers/net/netdevsim/netdev.c
778
return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
drivers/net/netdevsim/netdev.c
781
qmem->rq = nsim_queue_alloc();
drivers/net/netdevsim/netdev.c
782
if (!qmem->rq)
drivers/net/netdevsim/netdev.c
785
err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi);
drivers/net/netdevsim/netdev.c
790
netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
drivers/net/netdevsim/netdev.c
796
nsim_queue_free(dev, qmem->rq);
drivers/net/netdevsim/netdev.c
806
if (qmem->rq) {
drivers/net/netdevsim/netdev.c
808
netif_napi_del_locked(&qmem->rq->napi);
drivers/net/netdevsim/netdev.c
809
page_pool_destroy(qmem->rq->page_pool);
drivers/net/netdevsim/netdev.c
81
NSIM_RING_SIZE - skb_queue_len(&rq->skb_queue),
drivers/net/netdevsim/netdev.c
810
nsim_queue_free(dev, qmem->rq);
drivers/net/netdevsim/netdev.c
824
ns->rq[idx]->page_pool = qmem->pp;
drivers/net/netdevsim/netdev.c
825
napi_enable_locked(&ns->rq[idx]->napi);
drivers/net/netdevsim/netdev.c
833
netif_napi_del_locked(&ns->rq[idx]->napi);
drivers/net/netdevsim/netdev.c
834
netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
drivers/net/netdevsim/netdev.c
837
netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
drivers/net/netdevsim/netdev.c
839
netif_napi_del_locked(&ns->rq[idx]->napi);
drivers/net/netdevsim/netdev.c
842
ns->rq[idx] = qmem->rq;
drivers/net/netdevsim/netdev.c
843
napi_enable_locked(&ns->rq[idx]->napi);
drivers/net/netdevsim/netdev.c
855
napi_disable_locked(&ns->rq[idx]->napi);
drivers/net/netdevsim/netdev.c
858
qmem->pp = ns->rq[idx]->page_pool;
drivers/net/netdevsim/netdev.c
86
struct nsim_rq *rq, struct sk_buff *skb)
drivers/net/netdevsim/netdev.c
861
qmem->rq = ns->rq[idx];
drivers/net/netdevsim/netdev.c
88
if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
drivers/net/netdevsim/netdev.c
93
skb_queue_tail(&rq->skb_queue, skb);
drivers/net/netdevsim/netdev.c
951
ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool);
drivers/net/netdevsim/netdev.c
96
if (skb_queue_len(&rq->skb_queue) >= NSIM_RING_SIZE)
drivers/net/netdevsim/netdev.c
97
nsim_stop_tx_queue(tx_dev, rx_dev, rq,
drivers/net/netdevsim/netdevsim.h
107
struct nsim_rq **rq;
drivers/net/phy/bcm-phy-ptp.c
686
struct ptp_clock_request *rq, int on)
drivers/net/phy/bcm-phy-ptp.c
693
switch (rq->type) {
drivers/net/phy/bcm-phy-ptp.c
696
err = bcm_ptp_perout_locked(priv, &rq->perout, on);
drivers/net/phy/dp83640.c
471
struct ptp_clock_request *rq, int on)
drivers/net/phy/dp83640.c
479
switch (rq->type) {
drivers/net/phy/dp83640.c
482
if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
drivers/net/phy/dp83640.c
483
(rq->extts.flags & PTP_ENABLE_FEATURE) &&
drivers/net/phy/dp83640.c
484
(rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
drivers/net/phy/dp83640.c
487
index = rq->extts.index;
drivers/net/phy/dp83640.c
498
if (rq->extts.flags & PTP_FALLING_EDGE)
drivers/net/phy/dp83640.c
509
if (rq->perout.index >= N_PER_OUT)
drivers/net/phy/dp83640.c
511
return periodic_output(clock, rq, on, rq->perout.index);
drivers/net/phy/micrel.c
3756
struct ptp_clock_request *rq, int on)
drivers/net/phy/micrel.c
3767
event = rq->perout.index;
drivers/net/phy/micrel.c
3781
ts_on.tv_sec = rq->perout.on.sec;
drivers/net/phy/micrel.c
3782
ts_on.tv_nsec = rq->perout.on.nsec;
drivers/net/phy/micrel.c
3785
ts_period.tv_sec = rq->perout.period.sec;
drivers/net/phy/micrel.c
3786
ts_period.tv_nsec = rq->perout.period.nsec;
drivers/net/phy/micrel.c
3855
lan8814_ptp_set_target(phydev, event, rq->perout.start.sec,
drivers/net/phy/micrel.c
3856
rq->perout.start.nsec);
drivers/net/phy/micrel.c
3857
lan8814_ptp_set_reload(phydev, event, rq->perout.period.sec,
drivers/net/phy/micrel.c
3858
rq->perout.period.nsec);
drivers/net/phy/micrel.c
3928
struct ptp_clock_request *rq, int on)
drivers/net/phy/micrel.c
3936
rq->extts.index);
drivers/net/phy/micrel.c
3942
lan8814_ptp_extts_on(phydev, pin, rq->extts.flags);
drivers/net/phy/micrel.c
3952
struct ptp_clock_request *rq, int on)
drivers/net/phy/micrel.c
3954
switch (rq->type) {
drivers/net/phy/micrel.c
3956
return lan8814_ptp_perout(ptpci, rq, on);
drivers/net/phy/micrel.c
3958
return lan8814_ptp_extts(ptpci, rq, on);
drivers/net/phy/micrel.c
5661
struct ptp_clock_request *rq, int on)
drivers/net/phy/micrel.c
5672
pin = ptp_find_pin(ptp_priv->ptp_clock, PTP_PF_PEROUT, rq->perout.index);
drivers/net/phy/micrel.c
5684
ts_on.tv_sec = rq->perout.on.sec;
drivers/net/phy/micrel.c
5685
ts_on.tv_nsec = rq->perout.on.nsec;
drivers/net/phy/micrel.c
5688
ts_period.tv_sec = rq->perout.period.sec;
drivers/net/phy/micrel.c
5689
ts_period.tv_nsec = rq->perout.period.nsec;
drivers/net/phy/micrel.c
5755
ret = lan8841_ptp_set_target(ptp_priv, LAN8841_EVENT_A, rq->perout.start.sec,
drivers/net/phy/micrel.c
5756
rq->perout.start.nsec);
drivers/net/phy/micrel.c
5761
ret = lan8841_ptp_set_reload(ptp_priv, LAN8841_EVENT_A, rq->perout.period.sec,
drivers/net/phy/micrel.c
5762
rq->perout.period.nsec);
drivers/net/phy/micrel.c
5843
struct ptp_clock_request *rq, int on)
drivers/net/phy/micrel.c
5851
if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
drivers/net/phy/micrel.c
5856
pin = ptp_find_pin(ptp_priv->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
drivers/net/phy/micrel.c
5862
ret = lan8841_ptp_extts_on(ptp_priv, pin, rq->extts.flags);
drivers/net/phy/micrel.c
5871
struct ptp_clock_request *rq, int on)
drivers/net/phy/micrel.c
5873
switch (rq->type) {
drivers/net/phy/micrel.c
5875
return lan8841_ptp_extts(ptp, rq, on);
drivers/net/phy/micrel.c
5877
return lan8841_ptp_perout(ptp, rq, on);
drivers/net/plip/plip.c
1216
plip_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/net/plip/plip.c
1220
struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
drivers/net/ppp/ppp_generic.c
1868
if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
drivers/net/ppp/ppp_generic.c
1870
skb_queue_tail(&ppp->file.rq, skb);
drivers/net/ppp/ppp_generic.c
2352
skb_queue_tail(&pch->file.rq, skb);
drivers/net/ppp/ppp_generic.c
2354
while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
drivers/net/ppp/ppp_generic.c
2355
(skb = skb_dequeue(&pch->file.rq)))
drivers/net/ppp/ppp_generic.c
2506
skb_queue_tail(&ppp->file.rq, skb);
drivers/net/ppp/ppp_generic.c
2508
while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
drivers/net/ppp/ppp_generic.c
2509
(skb = skb_dequeue(&ppp->file.rq)))
drivers/net/ppp/ppp_generic.c
3419
skb_queue_head_init(&pf->rq);
drivers/net/ppp/ppp_generic.c
3447
skb_queue_purge(&ppp->file.rq);
drivers/net/ppp/ppp_generic.c
3606
skb_queue_purge(&pch->file.rq);
drivers/net/ppp/ppp_generic.c
449
skb = skb_dequeue(&pf->rq);
drivers/net/ppp/ppp_generic.c
565
if (skb_peek(&pf->rq))
drivers/net/ppp/ppp_generic.c
98
struct sk_buff_head rq; /* receive queue for pppd */
drivers/net/slip/slip.c
112
static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd);
drivers/net/slip/slip.c
1186
static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/net/slip/slip.c
1190
unsigned long *p = (unsigned long *)&rq->ifr_ifru;
drivers/net/usb/pegasus.c
1025
static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq,
drivers/net/usb/pegasus.c
1028
__u16 *data = (__u16 *) &rq->ifr_ifru;
drivers/net/usb/r8152.c
9313
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
drivers/net/usb/r8152.c
9316
struct mii_ioctl_data *data = if_mii(rq);
drivers/net/usb/rtl8150.c
838
static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq,
drivers/net/usb/rtl8150.c
842
u16 *data = (u16 *) & rq->ifr_ifru;
drivers/net/usb/smsc75xx.c
744
static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
drivers/net/usb/smsc75xx.c
749
return usbnet_mii_ioctl(netdev, rq, cmd);
drivers/net/usb/usbnet.c
1089
int usbnet_mii_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
drivers/net/usb/usbnet.c
1093
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
drivers/net/veth.c
1007
static int veth_create_page_pool(struct veth_rq *rq)
drivers/net/veth.c
1013
.dev = &rq->dev->dev,
drivers/net/veth.c
1016
rq->page_pool = page_pool_create(&pp_params);
drivers/net/veth.c
1017
if (IS_ERR(rq->page_pool)) {
drivers/net/veth.c
1018
int err = PTR_ERR(rq->page_pool);
drivers/net/veth.c
1020
rq->page_pool = NULL;
drivers/net/veth.c
1033
err = veth_create_page_pool(&priv->rq[i]);
drivers/net/veth.c
1039
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1041
err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
drivers/net/veth.c
1047
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1049
napi_enable(&rq->xdp_napi);
drivers/net/veth.c
1050
rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
drivers/net/veth.c
1057
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
drivers/net/veth.c
1061
page_pool_destroy(priv->rq[i].page_pool);
drivers/net/veth.c
1062
priv->rq[i].page_pool = NULL;
drivers/net/veth.c
1079
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1081
rcu_assign_pointer(priv->rq[i].napi, NULL);
drivers/net/veth.c
1082
napi_disable(&rq->xdp_napi);
drivers/net/veth.c
1083
__netif_napi_del(&rq->xdp_napi);
drivers/net/veth.c
1088
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1090
rq->rx_notify_masked = false;
drivers/net/veth.c
1091
ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
drivers/net/veth.c
1095
page_pool_destroy(priv->rq[i].page_pool);
drivers/net/veth.c
1096
priv->rq[i].page_pool = NULL;
drivers/net/veth.c
1117
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1120
netif_napi_add(dev, &rq->xdp_napi, veth_poll);
drivers/net/veth.c
1121
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
drivers/net/veth.c
1125
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
drivers/net/veth.c
1132
rq->xdp_mem = rq->xdp_rxq.mem;
drivers/net/veth.c
1137
xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
drivers/net/veth.c
1140
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1142
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/veth.c
1144
netif_napi_del(&rq->xdp_napi);
drivers/net/veth.c
1157
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1159
rq->xdp_rxq.mem = rq->xdp_mem;
drivers/net/veth.c
1160
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/veth.c
1163
netif_napi_del(&rq->xdp_napi);
drivers/net/veth.c
1173
if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
drivers/net/veth.c
1188
rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
drivers/net/veth.c
1189
rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
drivers/net/veth.c
1201
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
drivers/net/veth.c
1215
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1217
netif_napi_add(dev, &rq->xdp_napi, veth_poll);
drivers/net/veth.c
1223
struct veth_rq *rq = &priv->rq[i];
drivers/net/veth.c
1225
netif_napi_del(&rq->xdp_napi);
drivers/net/veth.c
1437
priv->rq = kvzalloc_objs(*priv->rq, dev->num_rx_queues,
drivers/net/veth.c
1439
if (!priv->rq)
drivers/net/veth.c
1443
priv->rq[i].dev = dev;
drivers/net/veth.c
1444
u64_stats_init(&priv->rq[i].stats.syncp);
drivers/net/veth.c
1454
kvfree(priv->rq);
drivers/net/veth.c
189
if (!priv->rq[i].page_pool)
drivers/net/veth.c
191
page_pool_get_stats(priv->rq[i].page_pool, &pp_stats);
drivers/net/veth.c
207
const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
drivers/net/veth.c
228
const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
drivers/net/veth.c
301
static void __veth_xdp_flush(struct veth_rq *rq)
drivers/net/veth.c
305
if (!READ_ONCE(rq->rx_notify_masked) &&
drivers/net/veth.c
306
napi_schedule_prep(&rq->xdp_napi)) {
drivers/net/veth.c
307
WRITE_ONCE(rq->rx_notify_masked, true);
drivers/net/veth.c
308
__napi_schedule(&rq->xdp_napi);
drivers/net/veth.c
312
static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
drivers/net/veth.c
314
if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb)))
drivers/net/veth.c
321
struct veth_rq *rq, bool xdp)
drivers/net/veth.c
324
veth_xdp_rx(rq, skb) :
drivers/net/veth.c
350
struct veth_rq *rq = NULL;
drivers/net/veth.c
367
rq = &rcv_priv->rq[rxq];
drivers/net/veth.c
373
use_napi = rcu_access_pointer(rq->napi) &&
drivers/net/veth.c
379
ret = veth_forward_skb(rcv, skb, rq, use_napi);
drivers/net/veth.c
385
__veth_xdp_flush(rq);
drivers/net/veth.c
404
__veth_xdp_flush(rq);
drivers/net/veth.c
432
struct veth_rq_stats *stats = &priv->rq[i].stats;
drivers/net/veth.c
511
struct veth_rq *rq;
drivers/net/veth.c
522
rq = &rcv_priv->rq[veth_select_rxq(rcv)];
drivers/net/veth.c
526
if (!rcu_access_pointer(rq->napi))
drivers/net/veth.c
531
spin_lock(&rq->xdp_ring.producer_lock);
drivers/net/veth.c
537
__ptr_ring_produce(&rq->xdp_ring, ptr)))
drivers/net/veth.c
541
spin_unlock(&rq->xdp_ring.producer_lock);
drivers/net/veth.c
544
__veth_xdp_flush(rq);
drivers/net/veth.c
548
u64_stats_update_begin(&rq->stats.syncp);
drivers/net/veth.c
549
rq->stats.vs.peer_tq_xdp_xmit += nxmit;
drivers/net/veth.c
550
rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
drivers/net/veth.c
551
u64_stats_update_end(&rq->stats.syncp);
drivers/net/veth.c
575
static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
drivers/net/veth.c
579
sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
drivers/net/veth.c
589
trace_xdp_bulk_tx(rq->dev, sent, drops, err);
drivers/net/veth.c
591
u64_stats_update_begin(&rq->stats.syncp);
drivers/net/veth.c
592
rq->stats.vs.xdp_tx += sent;
drivers/net/veth.c
593
rq->stats.vs.xdp_tx_err += drops;
drivers/net/veth.c
594
u64_stats_update_end(&rq->stats.syncp);
drivers/net/veth.c
599
static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
drivers/net/veth.c
601
struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
drivers/net/veth.c
606
veth_xdp_flush_bq(rq, bq);
drivers/net/veth.c
612
rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
drivers/net/veth.c
622
static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
drivers/net/veth.c
631
veth_xdp_flush_bq(rq, bq);
drivers/net/veth.c
638
static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
drivers/net/veth.c
647
xdp_prog = rcu_dereference(rq->xdp_prog);
drivers/net/veth.c
654
xdp->rxq = &rq->xdp_rxq;
drivers/net/veth.c
667
if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
drivers/net/veth.c
668
trace_xdp_exception(rq->dev, xdp_prog, act);
drivers/net/veth.c
679
if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
drivers/net/veth.c
688
bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
drivers/net/veth.c
691
trace_xdp_exception(rq->dev, xdp_prog, act);
drivers/net/veth.c
709
static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
drivers/net/veth.c
728
rq->dev);
drivers/net/veth.c
734
napi_gro_receive(&rq->xdp_napi, skb);
drivers/net/veth.c
751
static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
drivers/net/veth.c
761
if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
drivers/net/veth.c
770
xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
drivers/net/veth.c
78
struct veth_rq *rq;
drivers/net/veth.c
790
static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
drivers/net/veth.c
805
xdp_prog = rcu_dereference(rq->xdp_prog);
drivers/net/veth.c
812
if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
drivers/net/veth.c
827
xdp->rxq->mem = rq->xdp_mem;
drivers/net/veth.c
828
if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
drivers/net/veth.c
829
trace_xdp_exception(rq->dev, xdp_prog, act);
drivers/net/veth.c
839
xdp->rxq->mem = rq->xdp_mem;
drivers/net/veth.c
840
if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
drivers/net/veth.c
848
bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
drivers/net/veth.c
851
trace_xdp_exception(rq->dev, xdp_prog, act);
drivers/net/veth.c
881
skb->protocol = eth_type_trans(skb, rq->dev);
drivers/net/veth.c
901
static int veth_xdp_rcv(struct veth_rq *rq, int budget,
drivers/net/veth.c
909
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
drivers/net/veth.c
919
frame = veth_xdp_rcv_one(rq, frame, bq, stats);
drivers/net/veth.c
924
veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
drivers/net/veth.c
934
skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
drivers/net/veth.c
939
napi_gro_receive(&rq->xdp_napi, skb);
drivers/net/veth.c
946
veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
drivers/net/veth.c
948
u64_stats_update_begin(&rq->stats.syncp);
drivers/net/veth.c
949
rq->stats.vs.xdp_redirect += stats->xdp_redirect;
drivers/net/veth.c
950
rq->stats.vs.xdp_bytes += stats->xdp_bytes;
drivers/net/veth.c
951
rq->stats.vs.xdp_drops += stats->xdp_drops;
drivers/net/veth.c
952
rq->stats.vs.rx_drops += stats->rx_drops;
drivers/net/veth.c
953
rq->stats.vs.xdp_packets += done;
drivers/net/veth.c
954
u64_stats_update_end(&rq->stats.syncp);
drivers/net/veth.c
961
struct veth_rq *rq =
drivers/net/veth.c
963
struct veth_priv *priv = netdev_priv(rq->dev);
drivers/net/veth.c
964
int queue_idx = rq->xdp_rxq.queue_index;
drivers/net/veth.c
978
done = veth_xdp_rcv(rq, budget, &bq, &stats);
drivers/net/veth.c
983
veth_xdp_flush(rq, &bq);
drivers/net/veth.c
988
smp_store_mb(rq->rx_notify_masked, false);
drivers/net/veth.c
989
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
drivers/net/veth.c
990
if (napi_schedule_prep(&rq->xdp_napi)) {
drivers/net/veth.c
991
WRITE_ONCE(rq->rx_notify_masked, true);
drivers/net/veth.c
992
__napi_schedule(&rq->xdp_napi);
drivers/net/virtio_net.c
1001
struct page_frag *alloc_frag = &rq->alloc_frag;
drivers/net/virtio_net.c
1002
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
1015
if (rq->last_dma) {
drivers/net/virtio_net.c
1020
virtnet_rq_unmap(rq, rq->last_dma, 0);
drivers/net/virtio_net.c
1021
rq->last_dma = NULL;
drivers/net/virtio_net.c
1026
addr = virtqueue_map_single_attrs(rq->vq, dma + 1,
drivers/net/virtio_net.c
1028
if (virtqueue_map_mapping_error(rq->vq, addr))
drivers/net/virtio_net.c
1032
dma->need_sync = virtqueue_map_need_sync(rq->vq, addr);
drivers/net/virtio_net.c
1042
rq->last_dma = dma;
drivers/net/virtio_net.c
1058
struct receive_queue *rq;
drivers/net/virtio_net.c
1061
rq = &vi->rq[i];
drivers/net/virtio_net.c
1063
if (rq->xsk_pool) {
drivers/net/virtio_net.c
1069
virtnet_rq_unmap(rq, buf, 0);
drivers/net/virtio_net.c
1071
virtnet_rq_free_buf(vi, rq, buf);
drivers/net/virtio_net.c
1166
struct receive_queue *rq, void *buf,
drivers/net/virtio_net.c
1184
bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool);
drivers/net/virtio_net.c
1209
static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
drivers/net/virtio_net.c
1217
skb = napi_alloc_skb(&rq->napi, size);
drivers/net/virtio_net.c
1239
struct receive_queue *rq, struct xdp_buff *xdp,
drivers/net/virtio_net.c
1248
prog = rcu_dereference(rq->xdp_prog);
drivers/net/virtio_net.c
1255
return xsk_construct_skb(rq, xdp);
drivers/net/virtio_net.c
1270
struct receive_queue *rq,
drivers/net/virtio_net.c
1278
xdp = virtqueue_get_buf(rq->vq, &len);
drivers/net/virtio_net.c
1291
struct receive_queue *rq,
drivers/net/virtio_net.c
1306
buf = virtqueue_get_buf(rq->vq, &len);
drivers/net/virtio_net.c
1318
xdp = buf_to_xdp(vi, rq, buf, len, false);
drivers/net/virtio_net.c
1347
xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
drivers/net/virtio_net.c
1352
struct receive_queue *rq, struct xdp_buff *xdp,
drivers/net/virtio_net.c
1366
prog = rcu_dereference(rq->xdp_prog);
drivers/net/virtio_net.c
1379
skb = xsk_construct_skb(rq, xdp);
drivers/net/virtio_net.c
1383
if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
drivers/net/virtio_net.c
1400
xsk_drop_follow_bufs(dev, rq, num_buf, stats);
drivers/net/virtio_net.c
1407
static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
1421
xdp = buf_to_xdp(vi, rq, buf, len, true);
drivers/net/virtio_net.c
1435
skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
drivers/net/virtio_net.c
1437
skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
drivers/net/virtio_net.c
1440
virtnet_receive_done(vi, rq, skb, flags);
drivers/net/virtio_net.c
1443
static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
1452
xsk_buffs = rq->xsk_buffs;
drivers/net/virtio_net.c
1454
num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
drivers/net/virtio_net.c
1467
sg_init_table(rq->sg, 1);
drivers/net/virtio_net.c
1468
sg_fill_dma(rq->sg, addr, len);
drivers/net/virtio_net.c
1470
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
drivers/net/virtio_net.c
1717
struct receive_queue *rq = vi->rq;
drivers/net/virtio_net.c
1728
xdp_prog = rcu_access_pointer(rq->xdp_prog);
drivers/net/virtio_net.c
1861
struct receive_queue *rq,
drivers/net/virtio_net.c
1890
buf = virtnet_rq_get_buf(rq, &buflen, &ctx);
drivers/net/virtio_net.c
1951
struct receive_queue *rq,
drivers/net/virtio_net.c
1990
xdp_page = xdp_linearize_page(dev, rq, &num_buf, page,
drivers/net/virtio_net.c
2001
xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
drivers/net/virtio_net.c
2042
struct receive_queue *rq,
drivers/net/virtio_net.c
2071
xdp_prog = rcu_dereference(rq->xdp_prog);
drivers/net/virtio_net.c
2073
skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
drivers/net/virtio_net.c
2094
struct receive_queue *rq,
drivers/net/virtio_net.c
2112
skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
drivers/net/virtio_net.c
2121
give_pages(rq, page);
drivers/net/virtio_net.c
2125
static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
drivers/net/virtio_net.c
2134
buf = virtnet_rq_get_buf(rq, &len, NULL);
drivers/net/virtio_net.c
2198
struct receive_queue *rq,
drivers/net/virtio_net.c
2216
xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
drivers/net/virtio_net.c
2240
buf = virtnet_rq_get_buf(rq, &len, &ctx);
drivers/net/virtio_net.c
2278
struct receive_queue *rq,
drivers/net/virtio_net.c
2326
xdp_page = xdp_linearize_page(vi->dev, rq, num_buf,
drivers/net/virtio_net.c
2357
struct receive_queue *rq,
drivers/net/virtio_net.c
2377
data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
drivers/net/virtio_net.c
2382
err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
drivers/net/virtio_net.c
2408
mergeable_buf_free(rq, num_buf, dev, stats);
drivers/net/virtio_net.c
2460
struct receive_queue *rq,
drivers/net/virtio_net.c
2485
xdp_prog = rcu_dereference(rq->xdp_prog);
drivers/net/virtio_net.c
2487
head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
drivers/net/virtio_net.c
2495
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
drivers/net/virtio_net.c
2501
buf = virtnet_rq_get_buf(rq, &len, &ctx);
drivers/net/virtio_net.c
2524
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
drivers/net/virtio_net.c
2529
mergeable_buf_free(rq, num_buf, dev, stats);
drivers/net/virtio_net.c
2573
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
2601
skb_record_rx_queue(skb, vq2rxq(rq->vq));
drivers/net/virtio_net.c
2606
napi_gro_receive(&rq->napi, skb);
drivers/net/virtio_net.c
2614
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
2626
virtnet_rq_free_buf(vi, rq, buf);
drivers/net/virtio_net.c
2641
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
drivers/net/virtio_net.c
2647
skb = receive_big(dev, vi, rq, buf, len, stats);
drivers/net/virtio_net.c
2650
skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
drivers/net/virtio_net.c
2656
virtnet_receive_done(vi, rq, skb, flags);
drivers/net/virtio_net.c
2664
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
2676
if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
drivers/net/virtio_net.c
2679
buf = virtnet_rq_alloc(rq, len, gfp);
drivers/net/virtio_net.c
2685
virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
drivers/net/virtio_net.c
2687
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
drivers/net/virtio_net.c
2689
virtnet_rq_unmap(rq, buf, 0);
drivers/net/virtio_net.c
2696
static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
2703
sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
drivers/net/virtio_net.c
2707
first = get_a_page(rq, gfp);
drivers/net/virtio_net.c
2710
give_pages(rq, list);
drivers/net/virtio_net.c
2713
sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
drivers/net/virtio_net.c
2720
first = get_a_page(rq, gfp);
drivers/net/virtio_net.c
2722
give_pages(rq, list);
drivers/net/virtio_net.c
2729
sg_set_buf(&rq->sg[0], p, vi->hdr_len);
drivers/net/virtio_net.c
2733
sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
drivers/net/virtio_net.c
2737
err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
drivers/net/virtio_net.c
2740
give_pages(rq, first);
drivers/net/virtio_net.c
2745
static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
drivers/net/virtio_net.c
2749
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
2757
rq->min_buf_len, PAGE_SIZE - hdr_len);
drivers/net/virtio_net.c
2763
struct receive_queue *rq, gfp_t gfp)
drivers/net/virtio_net.c
2765
struct page_frag *alloc_frag = &rq->alloc_frag;
drivers/net/virtio_net.c
2778
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
drivers/net/virtio_net.c
2786
buf = virtnet_rq_alloc(rq, len + room, gfp);
drivers/net/virtio_net.c
2804
virtnet_rq_init_one_sg(rq, buf, len);
drivers/net/virtio_net.c
2807
err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
drivers/net/virtio_net.c
2809
virtnet_rq_unmap(rq, buf, 0);
drivers/net/virtio_net.c
2823
static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
2828
if (rq->xsk_pool) {
drivers/net/virtio_net.c
2829
err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
drivers/net/virtio_net.c
2835
err = add_recvbuf_mergeable(vi, rq, gfp);
drivers/net/virtio_net.c
2837
err = add_recvbuf_big(vi, rq, gfp);
drivers/net/virtio_net.c
2839
err = add_recvbuf_small(vi, rq, gfp);
drivers/net/virtio_net.c
2843
} while (rq->vq->num_free);
drivers/net/virtio_net.c
2846
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
drivers/net/virtio_net.c
2849
flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
drivers/net/virtio_net.c
2850
u64_stats_inc(&rq->stats.kicks);
drivers/net/virtio_net.c
2851
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
drivers/net/virtio_net.c
2860
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
drivers/net/virtio_net.c
2862
rq->calls++;
drivers/net/virtio_net.c
2863
virtqueue_napi_schedule(&rq->napi, rvq);
drivers/net/virtio_net.c
2880
static void virtnet_napi_enable(struct receive_queue *rq)
drivers/net/virtio_net.c
2882
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
2883
int qidx = vq2rxq(rq->vq);
drivers/net/virtio_net.c
2885
virtnet_napi_do_enable(rq->vq, &rq->napi);
drivers/net/virtio_net.c
2886
netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
drivers/net/virtio_net.c
2922
static void virtnet_napi_disable(struct receive_queue *rq)
drivers/net/virtio_net.c
2924
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
2925
struct napi_struct *napi = &rq->napi;
drivers/net/virtio_net.c
2926
int qidx = vq2rxq(rq->vq);
drivers/net/virtio_net.c
2933
struct receive_queue *rq,
drivers/net/virtio_net.c
2943
buf = virtqueue_get_buf(rq->vq, &len);
drivers/net/virtio_net.c
2947
virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
drivers/net/virtio_net.c
2955
struct receive_queue *rq,
drivers/net/virtio_net.c
2967
(buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
drivers/net/virtio_net.c
2968
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
drivers/net/virtio_net.c
2973
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
drivers/net/virtio_net.c
2974
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
drivers/net/virtio_net.c
2982
static int virtnet_receive(struct receive_queue *rq, int budget,
drivers/net/virtio_net.c
2985
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
2989
if (rq->xsk_pool)
drivers/net/virtio_net.c
2990
packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
drivers/net/virtio_net.c
2992
packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
drivers/net/virtio_net.c
2995
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
drivers/net/virtio_net.c
2996
if (!try_fill_recv(vi, rq, GFP_ATOMIC))
drivers/net/virtio_net.c
3004
u64_stats_update_begin(&rq->stats.syncp);
drivers/net/virtio_net.c
3009
item = (u64_stats_t *)((u8 *)&rq->stats + offset);
drivers/net/virtio_net.c
3014
u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
drivers/net/virtio_net.c
3015
u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
drivers/net/virtio_net.c
3017
u64_stats_update_end(&rq->stats.syncp);
drivers/net/virtio_net.c
3022
static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
drivers/net/virtio_net.c
3024
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
3025
unsigned int index = vq2rxq(rq->vq);
drivers/net/virtio_net.c
3050
static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
drivers/net/virtio_net.c
3054
if (!rq->packets_in_napi)
drivers/net/virtio_net.c
3060
dim_update_sample(rq->calls,
drivers/net/virtio_net.c
3061
u64_stats_read(&rq->stats.packets),
drivers/net/virtio_net.c
3062
u64_stats_read(&rq->stats.bytes),
drivers/net/virtio_net.c
3065
net_dim(&rq->dim, &cur_sample);
drivers/net/virtio_net.c
3066
rq->packets_in_napi = 0;
drivers/net/virtio_net.c
3071
struct receive_queue *rq =
drivers/net/virtio_net.c
3073
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
3079
virtnet_poll_cleantx(rq, budget);
drivers/net/virtio_net.c
3081
received = virtnet_receive(rq, budget, &xdp_xmit);
drivers/net/virtio_net.c
3082
rq->packets_in_napi += received;
drivers/net/virtio_net.c
3089
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
drivers/net/virtio_net.c
3094
if (napi_complete && rq->dim_enabled)
drivers/net/virtio_net.c
3095
virtnet_rx_dim_update(vi, rq);
drivers/net/virtio_net.c
3114
virtnet_napi_disable(&vi->rq[qp_index]);
drivers/net/virtio_net.c
3115
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
drivers/net/virtio_net.c
3123
err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
drivers/net/virtio_net.c
3124
vi->rq[qp_index].napi.napi_id);
drivers/net/virtio_net.c
3128
err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
drivers/net/virtio_net.c
3133
virtnet_napi_enable(&vi->rq[qp_index]);
drivers/net/virtio_net.c
3139
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
drivers/net/virtio_net.c
3179
try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
drivers/net/virtio_net.c
3200
virtnet_cancel_dim(vi, &vi->rq[i].dim);
drivers/net/virtio_net.c
3382
struct receive_queue *rq)
drivers/net/virtio_net.c
3387
virtnet_napi_disable(rq);
drivers/net/virtio_net.c
3388
virtnet_cancel_dim(vi, &rq->dim);
drivers/net/virtio_net.c
3397
virtnet_rx_pause(vi, &vi->rq[i]);
drivers/net/virtio_net.c
3401
struct receive_queue *rq,
drivers/net/virtio_net.c
3409
try_fill_recv(vi, rq, GFP_KERNEL);
drivers/net/virtio_net.c
3411
virtnet_napi_enable(rq);
drivers/net/virtio_net.c
3421
virtnet_rx_resume(vi, &vi->rq[i], true);
drivers/net/virtio_net.c
3423
virtnet_rx_resume(vi, &vi->rq[i], false);
drivers/net/virtio_net.c
3428
struct receive_queue *rq, u32 ring_num)
drivers/net/virtio_net.c
3432
qindex = rq - vi->rq;
drivers/net/virtio_net.c
3434
virtnet_rx_pause(vi, rq);
drivers/net/virtio_net.c
3436
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
drivers/net/virtio_net.c
3440
virtnet_rx_resume(vi, rq, true);
drivers/net/virtio_net.c
3642
struct receive_queue *rq = &vi->rq[i];
drivers/net/virtio_net.c
3653
start = u64_stats_fetch_begin(&rq->stats.syncp);
drivers/net/virtio_net.c
3654
rpackets = u64_stats_read(&rq->stats.packets);
drivers/net/virtio_net.c
3655
rbytes = u64_stats_read(&rq->stats.bytes);
drivers/net/virtio_net.c
3656
rdrops = u64_stats_read(&rq->stats.drops);
drivers/net/virtio_net.c
3657
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
drivers/net/virtio_net.c
3756
virtqueue_napi_schedule(&vi->rq[i].napi, vi->rq[i].vq);
drivers/net/virtio_net.c
3779
virtnet_cancel_dim(vi, &vi->rq[i].dim);
drivers/net/virtio_net.c
3930
virtqueue_set_affinity(vi->rq[i].vq, NULL);
drivers/net/virtio_net.c
395
struct receive_queue *rq;
drivers/net/virtio_net.c
3969
virtqueue_set_affinity(vi->rq[i].vq, mask);
drivers/net/virtio_net.c
4065
vi->rq[queue].intr_coal.max_usecs = max_usecs;
drivers/net/virtio_net.c
4066
vi->rq[queue].intr_coal.max_packets = max_packets;
drivers/net/virtio_net.c
4098
ring->rx_max_pending = vi->rq[0].vq->num_max;
drivers/net/virtio_net.c
4100
ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
drivers/net/virtio_net.c
4111
struct receive_queue *rq;
drivers/net/virtio_net.c
4118
rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
drivers/net/virtio_net.c
4125
if (ring->rx_pending > vi->rq[0].vq->num_max)
drivers/net/virtio_net.c
4132
rq = vi->rq + i;
drivers/net/virtio_net.c
4157
err = virtnet_rx_resize(vi, rq, ring->rx_pending);
drivers/net/virtio_net.c
4162
mutex_lock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
4166
mutex_unlock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
4371
if (vi->rq[0].xdp_prog)
drivers/net/virtio_net.c
5073
struct receive_queue *rq = &vi->rq[i];
drivers/net/virtio_net.c
5076
stats_base = (const u8 *)&rq->stats;
drivers/net/virtio_net.c
5078
start = u64_stats_fetch_begin(&rq->stats.syncp);
drivers/net/virtio_net.c
5080
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
drivers/net/virtio_net.c
5174
mutex_lock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
5175
vi->rq[i].dim_enabled = true;
drivers/net/virtio_net.c
5176
mutex_unlock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
5188
mutex_lock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
5189
vi->rq[i].dim_enabled = false;
drivers/net/virtio_net.c
5190
mutex_unlock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
520
static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
5210
mutex_lock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
5211
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
drivers/net/virtio_net.c
5212
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
drivers/net/virtio_net.c
5213
mutex_unlock(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
5244
mutex_lock(&vi->rq[queue].dim_lock);
drivers/net/virtio_net.c
5245
cur_rx_dim = vi->rq[queue].dim_enabled;
drivers/net/virtio_net.c
5246
max_usecs = vi->rq[queue].intr_coal.max_usecs;
drivers/net/virtio_net.c
5247
max_packets = vi->rq[queue].intr_coal.max_packets;
drivers/net/virtio_net.c
5251
mutex_unlock(&vi->rq[queue].dim_lock);
drivers/net/virtio_net.c
5256
vi->rq[queue].dim_enabled = true;
drivers/net/virtio_net.c
5257
mutex_unlock(&vi->rq[queue].dim_lock);
drivers/net/virtio_net.c
5262
vi->rq[queue].dim_enabled = false;
drivers/net/virtio_net.c
5270
mutex_unlock(&vi->rq[queue].dim_lock);
drivers/net/virtio_net.c
5296
struct receive_queue *rq = container_of(dim,
drivers/net/virtio_net.c
5298
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
5303
qnum = rq - vi->rq;
drivers/net/virtio_net.c
5305
mutex_lock(&rq->dim_lock);
drivers/net/virtio_net.c
5306
if (!rq->dim_enabled)
drivers/net/virtio_net.c
5310
if (update_moder.usec != rq->intr_coal.max_usecs ||
drivers/net/virtio_net.c
5311
update_moder.pkts != rq->intr_coal.max_packets) {
drivers/net/virtio_net.c
5321
mutex_unlock(&rq->dim_lock);
drivers/net/virtio_net.c
5467
mutex_lock(&vi->rq[queue].dim_lock);
drivers/net/virtio_net.c
5468
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
drivers/net/virtio_net.c
5471
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
drivers/net/virtio_net.c
5472
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
drivers/net/virtio_net.c
5473
mutex_unlock(&vi->rq[queue].dim_lock);
drivers/net/virtio_net.c
5599
struct receive_queue *rq = &vi->rq[i];
drivers/net/virtio_net.c
5605
virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
drivers/net/virtio_net.c
5781
static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
drivers/net/virtio_net.c
5786
qindex = rq - vi->rq;
drivers/net/virtio_net.c
5789
err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
drivers/net/virtio_net.c
5793
err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
drivers/net/virtio_net.c
5798
xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
drivers/net/virtio_net.c
5801
virtnet_rx_pause(vi, rq);
drivers/net/virtio_net.c
5803
err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
drivers/net/virtio_net.c
5810
rq->xsk_pool = pool;
drivers/net/virtio_net.c
5812
virtnet_rx_resume(vi, rq, true);
drivers/net/virtio_net.c
5818
xdp_rxq_info_unreg(&rq->xsk_rxq_info);
drivers/net/virtio_net.c
5851
struct receive_queue *rq;
drivers/net/virtio_net.c
5870
rq = &vi->rq[qid];
drivers/net/virtio_net.c
5879
if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
drivers/net/virtio_net.c
5882
dma_dev = virtqueue_dma_dev(rq->vq);
drivers/net/virtio_net.c
5886
size = virtqueue_get_vring_size(rq->vq);
drivers/net/virtio_net.c
5888
rq->xsk_buffs = kvzalloc_objs(*rq->xsk_buffs, size);
drivers/net/virtio_net.c
5889
if (!rq->xsk_buffs)
drivers/net/virtio_net.c
5903
err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
drivers/net/virtio_net.c
5919
virtnet_rq_bind_xsk_pool(vi, rq, NULL);
drivers/net/virtio_net.c
5923
virtqueue_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
drivers/net/virtio_net.c
5926
kvfree(rq->xsk_buffs);
drivers/net/virtio_net.c
5934
struct receive_queue *rq;
drivers/net/virtio_net.c
5942
rq = &vi->rq[qid];
drivers/net/virtio_net.c
5944
pool = rq->xsk_pool;
drivers/net/virtio_net.c
5946
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
drivers/net/virtio_net.c
5953
kvfree(rq->xsk_buffs);
drivers/net/virtio_net.c
6012
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
drivers/net/virtio_net.c
6029
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
drivers/net/virtio_net.c
6045
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
drivers/net/virtio_net.c
6070
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
drivers/net/virtio_net.c
6177
net_dim_setting(vi->dev, &vi->rq[i].dim, false);
drivers/net/virtio_net.c
6256
__netif_napi_del(&vi->rq[i].napi);
drivers/net/virtio_net.c
6265
kfree(vi->rq);
drivers/net/virtio_net.c
6276
while (vi->rq[i].pages)
drivers/net/virtio_net.c
6277
__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
drivers/net/virtio_net.c
6279
old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
drivers/net/virtio_net.c
6280
RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
drivers/net/virtio_net.c
6297
if (vi->rq[i].alloc_frag.page) {
drivers/net/virtio_net.c
6298
if (vi->rq[i].last_dma)
drivers/net/virtio_net.c
6299
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
drivers/net/virtio_net.c
6300
put_page(vi->rq[i].alloc_frag.page);
drivers/net/virtio_net.c
6349
struct virtqueue *vq = vi->rq[i].vq;
drivers/net/virtio_net.c
6424
sprintf(vi->rq[i].name, "input.%u", i);
drivers/net/virtio_net.c
6426
vqs_info[rxq2vq(i)].name = vi->rq[i].name;
drivers/net/virtio_net.c
6443
vi->rq[i].vq = vqs[rxq2vq(i)];
drivers/net/virtio_net.c
6444
vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
drivers/net/virtio_net.c
6475
vi->rq = kzalloc_objs(*vi->rq, vi->max_queue_pairs);
drivers/net/virtio_net.c
6476
if (!vi->rq)
drivers/net/virtio_net.c
6480
vi->rq[i].pages = NULL;
drivers/net/virtio_net.c
6481
netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
drivers/net/virtio_net.c
6483
vi->rq[i].napi.weight = napi_weight;
drivers/net/virtio_net.c
6488
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
drivers/net/virtio_net.c
6489
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
drivers/net/virtio_net.c
6492
u64_stats_init(&vi->rq[i].stats.syncp);
drivers/net/virtio_net.c
6494
mutex_init(&vi->rq[i].dim_lock);
drivers/net/virtio_net.c
6543
avg = &vi->rq[queue_index].mrg_avg_pkt_len;
drivers/net/virtio_net.c
6545
get_mergeable_buf_len(&vi->rq[queue_index], avg,
drivers/net/virtio_net.c
684
static void give_pages(struct receive_queue *rq, struct page *page)
drivers/net/virtio_net.c
690
end->private = (unsigned long)rq->pages;
drivers/net/virtio_net.c
691
rq->pages = page;
drivers/net/virtio_net.c
694
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
drivers/net/virtio_net.c
696
struct page *p = rq->pages;
drivers/net/virtio_net.c
699
rq->pages = (struct page *)p->private;
drivers/net/virtio_net.c
708
struct receive_queue *rq, void *buf)
drivers/net/virtio_net.c
713
give_pages(rq, buf);
drivers/net/virtio_net.c
844
struct receive_queue *rq,
drivers/net/virtio_net.c
880
give_pages(rq, page);
drivers/net/virtio_net.c
885
skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
drivers/net/virtio_net.c
920
give_pages(rq, page);
drivers/net/virtio_net.c
931
static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
drivers/net/virtio_net.c
933
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
950
virtqueue_map_sync_single_range_for_cpu(rq->vq, dma->addr,
drivers/net/virtio_net.c
958
virtqueue_unmap_single_attrs(rq->vq, dma->addr, dma->len,
drivers/net/virtio_net.c
963
static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
drivers/net/virtio_net.c
965
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
970
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
drivers/net/virtio_net.c
972
virtnet_rq_unmap(rq, buf, *len);
drivers/net/virtio_net.c
977
static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
drivers/net/virtio_net.c
979
struct virtnet_info *vi = rq->vq->vdev->priv;
drivers/net/virtio_net.c
987
head = page_address(rq->alloc_frag.page);
drivers/net/virtio_net.c
995
sg_init_table(rq->sg, 1);
drivers/net/virtio_net.c
996
sg_fill_dma(rq->sg, addr, len);
drivers/net/virtio_net.c
999
static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
drivers/net/vmxnet3/vmxnet3_drv.c
1378
struct vmxnet3_rx_queue *rq, int size)
drivers/net/vmxnet3/vmxnet3_drv.c
1398
err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
drivers/net/vmxnet3/vmxnet3_drv.c
1399
rq->napi.napi_id);
drivers/net/vmxnet3/vmxnet3_drv.c
1403
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
drivers/net/vmxnet3/vmxnet3_drv.c
1407
rq->page_pool = pp;
drivers/net/vmxnet3/vmxnet3_drv.c
1412
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_drv.c
1494
vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
drivers/net/vmxnet3/vmxnet3_drv.c
1497
rq->stats.drop_err++;
drivers/net/vmxnet3/vmxnet3_drv.c
1499
rq->stats.drop_fcs++;
drivers/net/vmxnet3/vmxnet3_drv.c
1501
rq->stats.drop_total++;
drivers/net/vmxnet3/vmxnet3_drv.c
1600
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_drv.c
1610
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
drivers/net/vmxnet3/vmxnet3_drv.c
1619
vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
drivers/net/vmxnet3/vmxnet3_drv.c
1621
while (rcd->gen == rq->comp_ring.gen) {
drivers/net/vmxnet3/vmxnet3_drv.c
1642
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
drivers/net/vmxnet3/vmxnet3_drv.c
1643
rcd->rqID != rq->dataRingQid);
drivers/net/vmxnet3/vmxnet3_drv.c
1646
ring = rq->rx_ring + ring_idx;
drivers/net/vmxnet3/vmxnet3_drv.c
1647
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
drivers/net/vmxnet3/vmxnet3_drv.c
1649
rbi = rq->buf_info[ring_idx] + idx;
drivers/net/vmxnet3/vmxnet3_drv.c
1655
vmxnet3_rx_error(rq, rcd, ctx, adapter);
drivers/net/vmxnet3/vmxnet3_drv.c
1671
act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
drivers/net/vmxnet3/vmxnet3_drv.c
1689
(rcd->rqID != rq->qid &&
drivers/net/vmxnet3/vmxnet3_drv.c
1690
rcd->rqID != rq->dataRingQid));
drivers/net/vmxnet3/vmxnet3_drv.c
1708
if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
drivers/net/vmxnet3/vmxnet3_drv.c
1711
ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
drivers/net/vmxnet3/vmxnet3_drv.c
1712
idx * rq->rx_ts_desc_size);
drivers/net/vmxnet3/vmxnet3_drv.c
1726
sz = rcd->rxdIdx * rq->data_ring.desc_size;
drivers/net/vmxnet3/vmxnet3_drv.c
1727
act = vmxnet3_process_xdp_small(adapter, rq,
drivers/net/vmxnet3/vmxnet3_drv.c
1728
&rq->data_ring.base[sz],
drivers/net/vmxnet3/vmxnet3_drv.c
1745
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
1747
rq->stats.drop_total++;
drivers/net/vmxnet3/vmxnet3_drv.c
1755
BUG_ON(rcd->len > rq->data_ring.desc_size);
drivers/net/vmxnet3/vmxnet3_drv.c
1758
sz = rcd->rxdIdx * rq->data_ring.desc_size;
drivers/net/vmxnet3/vmxnet3_drv.c
1760
&rq->data_ring.base[sz], rcd->len);
drivers/net/vmxnet3/vmxnet3_drv.c
1775
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
1777
rq->stats.drop_total++;
drivers/net/vmxnet3/vmxnet3_drv.c
1794
skb_record_rx_queue(ctx->skb, rq->qid);
drivers/net/vmxnet3/vmxnet3_drv.c
1836
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
1849
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
1941
!rq->shared->updateRxProd)
drivers/net/vmxnet3/vmxnet3_drv.c
1944
napi_gro_receive(&rq->napi, skb);
drivers/net/vmxnet3/vmxnet3_drv.c
1953
ring = rq->rx_ring + ring_idx;
drivers/net/vmxnet3/vmxnet3_drv.c
1969
rbi = rq->buf_info[ring_idx] + ring->next2fill;
drivers/net/vmxnet3/vmxnet3_drv.c
2000
if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
drivers/net/vmxnet3/vmxnet3_drv.c
2002
rxprod_reg[ring_idx] + rq->qid * 8,
drivers/net/vmxnet3/vmxnet3_drv.c
2006
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
drivers/net/vmxnet3/vmxnet3_drv.c
2008
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
drivers/net/vmxnet3/vmxnet3_drv.c
2018
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_drv.c
2025
if (!rq->rx_ring[0].base)
drivers/net/vmxnet3/vmxnet3_drv.c
2029
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
drivers/net/vmxnet3/vmxnet3_drv.c
2035
rbi = &rq->buf_info[ring_idx][i];
drivers/net/vmxnet3/vmxnet3_drv.c
2037
&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
drivers/net/vmxnet3/vmxnet3_drv.c
2041
page_pool_recycle_direct(rq->page_pool,
drivers/net/vmxnet3/vmxnet3_drv.c
2059
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
drivers/net/vmxnet3/vmxnet3_drv.c
2060
rq->rx_ring[ring_idx].next2fill =
drivers/net/vmxnet3/vmxnet3_drv.c
2061
rq->rx_ring[ring_idx].next2comp = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2064
rq->comp_ring.gen = VMXNET3_INIT_GEN;
drivers/net/vmxnet3/vmxnet3_drv.c
2065
rq->comp_ring.next2proc = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2067
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
drivers/net/vmxnet3/vmxnet3_drv.c
2068
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_drv.c
2069
page_pool_destroy(rq->page_pool);
drivers/net/vmxnet3/vmxnet3_drv.c
2070
rq->page_pool = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2085
static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_drv.c
2093
if (rq->buf_info[i]) {
drivers/net/vmxnet3/vmxnet3_drv.c
2094
for (j = 0; j < rq->rx_ring[i].size; j++)
drivers/net/vmxnet3/vmxnet3_drv.c
2095
BUG_ON(rq->buf_info[i][j].page != NULL);
drivers/net/vmxnet3/vmxnet3_drv.c
2101
if (rq->rx_ring[i].base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2103
rq->rx_ring[i].size
drivers/net/vmxnet3/vmxnet3_drv.c
2105
rq->rx_ring[i].base,
drivers/net/vmxnet3/vmxnet3_drv.c
2106
rq->rx_ring[i].basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2107
rq->rx_ring[i].base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2111
if (rq->data_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2113
rq->rx_ring[0].size * rq->data_ring.desc_size,
drivers/net/vmxnet3/vmxnet3_drv.c
2114
rq->data_ring.base, rq->data_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2115
rq->data_ring.base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2118
if (rq->ts_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2120
rq->rx_ring[0].size * rq->rx_ts_desc_size,
drivers/net/vmxnet3/vmxnet3_drv.c
2121
rq->ts_ring.base, rq->ts_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2122
rq->ts_ring.base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2125
if (rq->comp_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2126
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
drivers/net/vmxnet3/vmxnet3_drv.c
2128
rq->comp_ring.base, rq->comp_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2129
rq->comp_ring.base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2132
kfree(rq->buf_info[0]);
drivers/net/vmxnet3/vmxnet3_drv.c
2133
rq->buf_info[0] = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2134
rq->buf_info[1] = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2143
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
drivers/net/vmxnet3/vmxnet3_drv.c
2145
if (rq->data_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2147
(rq->rx_ring[0].size *
drivers/net/vmxnet3/vmxnet3_drv.c
2148
rq->data_ring.desc_size),
drivers/net/vmxnet3/vmxnet3_drv.c
2149
rq->data_ring.base,
drivers/net/vmxnet3/vmxnet3_drv.c
2150
rq->data_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2151
rq->data_ring.base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2153
rq->data_ring.desc_size = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2158
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_drv.c
2164
for (i = 0; i < rq->rx_ring[0].size; i++) {
drivers/net/vmxnet3/vmxnet3_drv.c
2168
rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
drivers/net/vmxnet3/vmxnet3_drv.c
2171
rq->buf_info[0][i].len = adapter->skb_buf_size;
drivers/net/vmxnet3/vmxnet3_drv.c
2173
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
drivers/net/vmxnet3/vmxnet3_drv.c
2174
rq->buf_info[0][i].len = PAGE_SIZE;
drivers/net/vmxnet3/vmxnet3_drv.c
2177
for (i = 0; i < rq->rx_ring[1].size; i++) {
drivers/net/vmxnet3/vmxnet3_drv.c
2178
rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
drivers/net/vmxnet3/vmxnet3_drv.c
2179
rq->buf_info[1][i].len = PAGE_SIZE;
drivers/net/vmxnet3/vmxnet3_drv.c
2184
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2186
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
drivers/net/vmxnet3/vmxnet3_drv.c
2188
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
drivers/net/vmxnet3/vmxnet3_drv.c
2189
rq->rx_ring[i].isOutOfOrder = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2192
err = vmxnet3_create_pp(adapter, rq,
drivers/net/vmxnet3/vmxnet3_drv.c
2193
rq->rx_ring[0].size + rq->rx_ring[1].size);
drivers/net/vmxnet3/vmxnet3_drv.c
2197
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
drivers/net/vmxnet3/vmxnet3_drv.c
2199
xdp_rxq_info_unreg(&rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_drv.c
2200
page_pool_destroy(rq->page_pool);
drivers/net/vmxnet3/vmxnet3_drv.c
2201
rq->page_pool = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2206
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
drivers/net/vmxnet3/vmxnet3_drv.c
2208
if (rq->ts_ring.base)
drivers/net/vmxnet3/vmxnet3_drv.c
2209
memset(rq->ts_ring.base, 0,
drivers/net/vmxnet3/vmxnet3_drv.c
2210
rq->rx_ring[0].size * rq->rx_ts_desc_size);
drivers/net/vmxnet3/vmxnet3_drv.c
2213
rq->comp_ring.next2proc = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2214
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
drivers/net/vmxnet3/vmxnet3_drv.c
2216
rq->comp_ring.gen = VMXNET3_INIT_GEN;
drivers/net/vmxnet3/vmxnet3_drv.c
2219
rq->rx_ctx.skb = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2246
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
drivers/net/vmxnet3/vmxnet3_drv.c
2254
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
drivers/net/vmxnet3/vmxnet3_drv.c
2255
rq->rx_ring[i].base = dma_alloc_coherent(
drivers/net/vmxnet3/vmxnet3_drv.c
2257
&rq->rx_ring[i].basePA,
drivers/net/vmxnet3/vmxnet3_drv.c
2259
if (!rq->rx_ring[i].base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2266
if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
drivers/net/vmxnet3/vmxnet3_drv.c
2267
sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
drivers/net/vmxnet3/vmxnet3_drv.c
2268
rq->data_ring.base =
drivers/net/vmxnet3/vmxnet3_drv.c
2270
&rq->data_ring.basePA,
drivers/net/vmxnet3/vmxnet3_drv.c
2272
if (!rq->data_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2278
rq->data_ring.base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2279
rq->data_ring.desc_size = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2282
if (rq->rx_ts_desc_size != 0) {
drivers/net/vmxnet3/vmxnet3_drv.c
2283
sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
drivers/net/vmxnet3/vmxnet3_drv.c
2284
rq->ts_ring.base =
drivers/net/vmxnet3/vmxnet3_drv.c
2286
&rq->ts_ring.basePA,
drivers/net/vmxnet3/vmxnet3_drv.c
2288
if (!rq->ts_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2291
rq->rx_ts_desc_size = 0;
drivers/net/vmxnet3/vmxnet3_drv.c
2294
rq->ts_ring.base = NULL;
drivers/net/vmxnet3/vmxnet3_drv.c
2297
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
drivers/net/vmxnet3/vmxnet3_drv.c
2298
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
drivers/net/vmxnet3/vmxnet3_drv.c
2299
&rq->comp_ring.basePA,
drivers/net/vmxnet3/vmxnet3_drv.c
2301
if (!rq->comp_ring.base) {
drivers/net/vmxnet3/vmxnet3_drv.c
2306
bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
drivers/net/vmxnet3/vmxnet3_drv.c
2307
sizeof(rq->buf_info[0][0]), GFP_KERNEL,
drivers/net/vmxnet3/vmxnet3_drv.c
2312
rq->buf_info[0] = bi;
drivers/net/vmxnet3/vmxnet3_drv.c
2313
rq->buf_info[1] = bi + rq->rx_ring[0].size;
drivers/net/vmxnet3/vmxnet3_drv.c
2318
vmxnet3_rq_destroy(rq, adapter);
drivers/net/vmxnet3/vmxnet3_drv.c
2392
struct vmxnet3_rx_queue *rq = container_of(napi,
drivers/net/vmxnet3/vmxnet3_drv.c
2394
struct vmxnet3_adapter *adapter = rq->adapter;
drivers/net/vmxnet3/vmxnet3_drv.c
2402
&adapter->tx_queue[rq - adapter->rx_queue];
drivers/net/vmxnet3/vmxnet3_drv.c
2406
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
drivers/net/vmxnet3/vmxnet3_drv.c
2410
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
drivers/net/vmxnet3/vmxnet3_drv.c
2456
struct vmxnet3_rx_queue *rq = data;
drivers/net/vmxnet3/vmxnet3_drv.c
2457
struct vmxnet3_adapter *adapter = rq->adapter;
drivers/net/vmxnet3/vmxnet3_drv.c
2461
vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
drivers/net/vmxnet3/vmxnet3_drv.c
2462
napi_schedule(&rq->napi);
drivers/net/vmxnet3/vmxnet3_drv.c
2647
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
drivers/net/vmxnet3/vmxnet3_drv.c
2648
rq->qid = i;
drivers/net/vmxnet3/vmxnet3_drv.c
2649
rq->qid2 = i + adapter->num_rx_queues;
drivers/net/vmxnet3/vmxnet3_drv.c
2650
rq->dataRingQid = i + 2 * adapter->num_rx_queues;
drivers/net/vmxnet3/vmxnet3_drv.c
2964
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
drivers/net/vmxnet3/vmxnet3_drv.c
2966
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2967
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2968
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2970
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
drivers/net/vmxnet3/vmxnet3_drv.c
2971
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
drivers/net/vmxnet3/vmxnet3_drv.c
2972
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
drivers/net/vmxnet3/vmxnet3_drv.c
2974
rqc->intrIdx = rq->comp_ring.intr_idx;
drivers/net/vmxnet3/vmxnet3_drv.c
2977
cpu_to_le64(rq->data_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2979
cpu_to_le16(rq->data_ring.desc_size);
drivers/net/vmxnet3/vmxnet3_drv.c
2983
rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
drivers/net/vmxnet3/vmxnet3_drv.c
2984
rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
drivers/net/vmxnet3/vmxnet3_drv.c
3422
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
drivers/net/vmxnet3/vmxnet3_drv.c
3424
rq->rx_ring[0].size = ring0_size;
drivers/net/vmxnet3/vmxnet3_drv.c
3425
rq->rx_ring[1].size = ring1_size;
drivers/net/vmxnet3/vmxnet3_drv.c
3426
rq->comp_ring.size = comp_size;
drivers/net/vmxnet3/vmxnet3_drv.c
3465
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
drivers/net/vmxnet3/vmxnet3_drv.c
3468
rq->shared = &adapter->rqd_start[i].ctrl;
drivers/net/vmxnet3/vmxnet3_drv.c
3469
rq->adapter = adapter;
drivers/net/vmxnet3/vmxnet3_drv.c
3470
rq->data_ring.desc_size = rxdata_desc_size;
drivers/net/vmxnet3/vmxnet3_drv.c
3471
rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
drivers/net/vmxnet3/vmxnet3_drv.c
3472
err = vmxnet3_rq_create(rq, adapter);
drivers/net/vmxnet3/vmxnet3_drv.c
673
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
drivers/net/vmxnet3/vmxnet3_drv.c
677
struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
drivers/net/vmxnet3/vmxnet3_drv.c
678
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
drivers/net/vmxnet3/vmxnet3_drv.c
690
void *data = vmxnet3_pp_get_buff(rq->page_pool,
drivers/net/vmxnet3/vmxnet3_drv.c
694
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
705
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
717
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
731
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_drv.c
742
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_ethtool.c
578
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
drivers/net/vmxnet3/vmxnet3_ethtool.c
585
buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
586
buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
587
buf[j++] = rq->rx_ring[0].size;
drivers/net/vmxnet3/vmxnet3_ethtool.c
588
buf[j++] = rq->rx_ring[0].next2fill;
drivers/net/vmxnet3/vmxnet3_ethtool.c
589
buf[j++] = rq->rx_ring[0].next2comp;
drivers/net/vmxnet3/vmxnet3_ethtool.c
590
buf[j++] = rq->rx_ring[0].gen;
drivers/net/vmxnet3/vmxnet3_ethtool.c
592
buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
593
buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
594
buf[j++] = rq->rx_ring[1].size;
drivers/net/vmxnet3/vmxnet3_ethtool.c
595
buf[j++] = rq->rx_ring[1].next2fill;
drivers/net/vmxnet3/vmxnet3_ethtool.c
596
buf[j++] = rq->rx_ring[1].next2comp;
drivers/net/vmxnet3/vmxnet3_ethtool.c
597
buf[j++] = rq->rx_ring[1].gen;
drivers/net/vmxnet3/vmxnet3_ethtool.c
599
buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
600
buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
601
buf[j++] = rq->rx_ring[0].size;
drivers/net/vmxnet3/vmxnet3_ethtool.c
602
buf[j++] = rq->data_ring.desc_size;
drivers/net/vmxnet3/vmxnet3_ethtool.c
604
buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
605
buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
drivers/net/vmxnet3/vmxnet3_ethtool.c
606
buf[j++] = rq->comp_ring.size;
drivers/net/vmxnet3/vmxnet3_ethtool.c
607
buf[j++] = rq->comp_ring.next2proc;
drivers/net/vmxnet3/vmxnet3_ethtool.c
608
buf[j++] = rq->comp_ring.gen;
drivers/net/vmxnet3/vmxnet3_int.h
471
#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
drivers/net/vmxnet3/vmxnet3_int.h
472
((rq)->rx_ring[ring_idx].size >> 3)
drivers/net/vmxnet3/vmxnet3_xdp.c
261
vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
drivers/net/vmxnet3/vmxnet3_xdp.c
269
rq->stats.xdp_packets++;
drivers/net/vmxnet3/vmxnet3_xdp.c
277
err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
drivers/net/vmxnet3/vmxnet3_xdp.c
279
rq->stats.xdp_redirects++;
drivers/net/vmxnet3/vmxnet3_xdp.c
281
rq->stats.xdp_drops++;
drivers/net/vmxnet3/vmxnet3_xdp.c
282
page_pool_recycle_direct(rq->page_pool, page);
drivers/net/vmxnet3/vmxnet3_xdp.c
288
vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
drivers/net/vmxnet3/vmxnet3_xdp.c
289
rq->stats.xdp_drops++;
drivers/net/vmxnet3/vmxnet3_xdp.c
290
page_pool_recycle_direct(rq->page_pool, page);
drivers/net/vmxnet3/vmxnet3_xdp.c
292
rq->stats.xdp_tx++;
drivers/net/vmxnet3/vmxnet3_xdp.c
296
bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act);
drivers/net/vmxnet3/vmxnet3_xdp.c
299
trace_xdp_exception(rq->adapter->netdev, prog, act);
drivers/net/vmxnet3/vmxnet3_xdp.c
300
rq->stats.xdp_aborted++;
drivers/net/vmxnet3/vmxnet3_xdp.c
303
rq->stats.xdp_drops++;
drivers/net/vmxnet3/vmxnet3_xdp.c
307
page_pool_recycle_direct(rq->page_pool, page);
drivers/net/vmxnet3/vmxnet3_xdp.c
313
vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page,
drivers/net/vmxnet3/vmxnet3_xdp.c
320
page_pool_recycle_direct(rq->page_pool, page);
drivers/net/vmxnet3/vmxnet3_xdp.c
321
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_xdp.c
336
struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_xdp.c
345
page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC);
drivers/net/vmxnet3/vmxnet3_xdp.c
347
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_xdp.c
351
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_xdp.c
352
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
drivers/net/vmxnet3/vmxnet3_xdp.c
359
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
drivers/net/vmxnet3/vmxnet3_xdp.c
364
act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
drivers/net/vmxnet3/vmxnet3_xdp.c
369
*skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
drivers/net/vmxnet3/vmxnet3_xdp.c
379
struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_xdp.c
395
rq->page_pool->p.offset, rbi->len,
drivers/net/vmxnet3/vmxnet3_xdp.c
396
page_pool_get_dma_dir(rq->page_pool));
drivers/net/vmxnet3/vmxnet3_xdp.c
398
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
drivers/net/vmxnet3/vmxnet3_xdp.c
399
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
drivers/net/vmxnet3/vmxnet3_xdp.c
403
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
drivers/net/vmxnet3/vmxnet3_xdp.c
408
act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
drivers/net/vmxnet3/vmxnet3_xdp.c
412
*skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
drivers/net/vmxnet3/vmxnet3_xdp.c
417
new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr,
drivers/net/vmxnet3/vmxnet3_xdp.c
420
rq->stats.rx_buf_alloc_failure++;
drivers/net/vmxnet3/vmxnet3_xdp.h
30
struct vmxnet3_rx_queue *rq,
drivers/net/vmxnet3/vmxnet3_xdp.h
36
struct vmxnet3_rx_queue *rq,
drivers/nvme/host/apple.c
771
struct request *req = bd->rq;
drivers/nvme/host/core.c
1142
int nvme_execute_rq(struct request *rq, bool at_head)
drivers/nvme/host/core.c
1146
status = blk_execute_rq(rq, at_head);
drivers/nvme/host/core.c
1147
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
drivers/nvme/host/core.c
1149
if (nvme_req(rq)->status)
drivers/nvme/host/core.c
1150
return nvme_req(rq)->status;
drivers/nvme/host/core.c
1335
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
drivers/nvme/host/core.c
1339
struct nvme_ctrl *ctrl = rq->end_io_data;
drivers/nvme/host/core.c
1340
unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
drivers/nvme/host/core.c
1356
blk_mq_free_request(rq);
drivers/nvme/host/core.c
1377
struct request *rq;
drivers/nvme/host/core.c
1389
rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
drivers/nvme/host/core.c
1391
if (IS_ERR(rq)) {
drivers/nvme/host/core.c
1393
dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
drivers/nvme/host/core.c
1397
nvme_init_request(rq, &ctrl->ka_cmd);
drivers/nvme/host/core.c
1399
rq->timeout = ctrl->kato * HZ;
drivers/nvme/host/core.c
1400
rq->end_io = nvme_keep_alive_end_io;
drivers/nvme/host/core.c
1401
rq->end_io_data = ctrl;
drivers/nvme/host/core.c
1402
blk_execute_rq_nowait(rq, false);
drivers/nvme/host/core.c
757
struct request *rq)
drivers/nvme/host/core.c
765
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
drivers/nvme/host/core.c
768
if (!(rq->rq_flags & RQF_DONTPREP))
drivers/nvme/host/core.c
769
nvme_clear_nvme_request(rq);
drivers/nvme/host/core.c
771
return nvme_host_path_error(rq);
drivers/nvme/host/core.c
775
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
drivers/nvme/host/core.c
778
struct nvme_request *req = nvme_req(rq);
drivers/nvme/host/core.c
788
if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
drivers/nvme/host/core.c
799
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
drivers/nvme/host/fabrics.h
205
static inline void nvmf_complete_timed_out_request(struct request *rq)
drivers/nvme/host/fabrics.h
207
if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
drivers/nvme/host/fabrics.h
208
nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
drivers/nvme/host/fabrics.h
209
blk_mq_complete_request(rq);
drivers/nvme/host/fc.c
101
struct request *rq;
drivers/nvme/host/fc.c
1802
nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
drivers/nvme/host/fc.c
1805
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/fc.c
1887
struct request *rq = op->rq;
drivers/nvme/host/fc.c
1889
if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio)
drivers/nvme/host/fc.c
1891
return blkcg_get_fc_appid(rq->bio);
drivers/nvme/host/fc.c
1899
struct request *rq = op->rq;
drivers/nvme/host/fc.c
2048
if (!nvme_try_complete_req(rq, status, result))
drivers/nvme/host/fc.c
2049
nvme_fc_complete_rq(rq);
drivers/nvme/host/fc.c
2060
struct request *rq, u32 rqno)
drivers/nvme/host/fc.c
2075
op->rq = rq;
drivers/nvme/host/fc.c
2111
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
drivers/nvme/host/fc.c
2115
struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/fc.c
2120
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
drivers/nvme/host/fc.c
2125
nvme_req(rq)->ctrl = &ctrl->ctrl;
drivers/nvme/host/fc.c
2126
nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
drivers/nvme/host/fc.c
229
static void nvme_fc_complete_rq(struct request *rq);
drivers/nvme/host/fc.c
2531
static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
drivers/nvme/host/fc.c
2533
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/fc.c
2561
nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
drivers/nvme/host/fc.c
2569
if (!blk_rq_nr_phys_segments(rq))
drivers/nvme/host/fc.c
2574
blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
drivers/nvme/host/fc.c
2579
op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl);
drivers/nvme/host/fc.c
2580
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
drivers/nvme/host/fc.c
2582
op->nents, rq_dma_dir(rq));
drivers/nvme/host/fc.c
2596
nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
drivers/nvme/host/fc.c
2605
rq_dma_dir(rq));
drivers/nvme/host/fc.c
2695
ret = nvme_fc_map_data(ctrl, op->rq, op);
drivers/nvme/host/fc.c
2697
nvme_cleanup_cmd(op->rq);
drivers/nvme/host/fc.c
2711
nvme_start_request(op->rq);
drivers/nvme/host/fc.c
2735
nvme_fc_unmap_data(ctrl, op->rq, op);
drivers/nvme/host/fc.c
2736
nvme_cleanup_cmd(op->rq);
drivers/nvme/host/fc.c
2758
struct request *rq = bd->rq;
drivers/nvme/host/fc.c
2759
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/fc.c
2766
!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
drivers/nvme/host/fc.c
2767
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
drivers/nvme/host/fc.c
2769
ret = nvme_setup_cmd(ns, rq);
drivers/nvme/host/fc.c
2781
if (blk_rq_nr_phys_segments(rq)) {
drivers/nvme/host/fc.c
2782
data_len = blk_rq_payload_bytes(rq);
drivers/nvme/host/fc.c
2783
io_dir = ((rq_data_dir(rq) == WRITE) ?
drivers/nvme/host/fc.c
2814
nvme_fc_complete_rq(struct request *rq)
drivers/nvme/host/fc.c
2816
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/fc.c
2822
nvme_fc_unmap_data(ctrl, rq, op);
drivers/nvme/host/fc.c
2823
nvme_complete_rq(rq);
drivers/nvme/host/fc.c
60
struct request *rq;
drivers/nvme/host/multipath.c
180
void nvme_mpath_start_request(struct request *rq)
drivers/nvme/host/multipath.c
182
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/host/multipath.c
186
!(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
drivers/nvme/host/multipath.c
188
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
drivers/nvme/host/multipath.c
191
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
drivers/nvme/host/multipath.c
192
(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
drivers/nvme/host/multipath.c
195
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
drivers/nvme/host/multipath.c
196
nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
drivers/nvme/host/multipath.c
201
void nvme_mpath_end_request(struct request *rq)
drivers/nvme/host/multipath.c
203
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/host/multipath.c
205
if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
drivers/nvme/host/multipath.c
208
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
drivers/nvme/host/multipath.c
210
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
drivers/nvme/host/multipath.c
211
blk_rq_bytes(rq) >> SECTOR_SHIFT,
drivers/nvme/host/multipath.c
212
nvme_req(rq)->start_time);
drivers/nvme/host/nvme.h
1041
void nvme_mpath_start_request(struct request *rq);
drivers/nvme/host/nvme.h
1042
void nvme_mpath_end_request(struct request *rq);
drivers/nvme/host/nvme.h
1147
static inline void nvme_mpath_start_request(struct request *rq)
drivers/nvme/host/nvme.h
1150
static inline void nvme_mpath_end_request(struct request *rq)
drivers/nvme/host/nvme.h
1213
static inline void nvme_start_request(struct request *rq)
drivers/nvme/host/nvme.h
1215
if (rq->cmd_flags & REQ_NVME_MPATH)
drivers/nvme/host/nvme.h
1216
nvme_mpath_start_request(rq);
drivers/nvme/host/nvme.h
1217
blk_mq_start_request(rq);
drivers/nvme/host/nvme.h
1270
int nvme_execute_rq(struct request *rq, bool at_head);
drivers/nvme/host/nvme.h
655
static inline u16 nvme_cid(struct request *rq)
drivers/nvme/host/nvme.h
657
return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
drivers/nvme/host/nvme.h
665
struct request *rq;
drivers/nvme/host/nvme.h
667
rq = blk_mq_tag_to_rq(tags, tag);
drivers/nvme/host/nvme.h
668
if (unlikely(!rq)) {
drivers/nvme/host/nvme.h
673
if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
drivers/nvme/host/nvme.h
674
dev_err(nvme_req(rq)->ctrl->device,
drivers/nvme/host/nvme.h
676
tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
drivers/nvme/host/nvme.h
679
return rq;
drivers/nvme/host/nvme.h
792
struct nvme_request *rq = nvme_req(req);
drivers/nvme/host/nvme.h
793
struct nvme_ctrl *ctrl = rq->ctrl;
drivers/nvme/host/nvme.h
796
rq->genctr++;
drivers/nvme/host/nvme.h
798
rq->status = le16_to_cpu(status) >> 1;
drivers/nvme/host/nvme.h
799
rq->result = result;
drivers/nvme/host/nvme.h
849
void (*fn)(struct request *rq))
drivers/nvme/host/nvme.h
911
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
drivers/nvme/host/nvme.h
914
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
drivers/nvme/host/nvme.h
923
return __nvme_check_ready(ctrl, rq, queue_live, state);
drivers/nvme/host/pci.c
1410
struct request *req = bd->rq;
drivers/nvme/host/rdma.c
1159
struct request *rq = blk_mq_rq_from_pdu(req);
drivers/nvme/host/rdma.c
1163
if (!nvme_try_complete_req(rq, req->status, req->result))
drivers/nvme/host/rdma.c
1164
nvme_rdma_complete_rq(rq);
drivers/nvme/host/rdma.c
1215
static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
drivers/nvme/host/rdma.c
1217
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1219
if (blk_integrity_rq(rq)) {
drivers/nvme/host/rdma.c
1221
req->metadata_sgl->nents, rq_dma_dir(rq));
drivers/nvme/host/rdma.c
1227
rq_dma_dir(rq));
drivers/nvme/host/rdma.c
1232
struct request *rq)
drivers/nvme/host/rdma.c
1234
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1239
if (!blk_rq_nr_phys_segments(rq))
drivers/nvme/host/rdma.c
1250
nvme_rdma_dma_unmap_req(ibdev, rq);
drivers/nvme/host/rdma.c
1412
struct request *rq = blk_mq_rq_from_pdu(req);
drivers/nvme/host/rdma.c
1413
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/host/rdma.c
1414
struct bio *bio = rq->bio;
drivers/nvme/host/rdma.c
1466
static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
drivers/nvme/host/rdma.c
1469
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1474
blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
drivers/nvme/host/rdma.c
1479
req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl);
drivers/nvme/host/rdma.c
1482
req->data_sgl.nents, rq_dma_dir(rq));
drivers/nvme/host/rdma.c
1488
if (blk_integrity_rq(rq)) {
drivers/nvme/host/rdma.c
1492
rq->nr_integrity_segments,
drivers/nvme/host/rdma.c
1500
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
drivers/nvme/host/rdma.c
1505
rq_dma_dir(rq));
drivers/nvme/host/rdma.c
1519
rq_dma_dir(rq));
drivers/nvme/host/rdma.c
1526
struct request *rq, struct nvme_command *c)
drivers/nvme/host/rdma.c
1528
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1539
if (!blk_rq_nr_phys_segments(rq))
drivers/nvme/host/rdma.c
154
static void nvme_rdma_complete_rq(struct request *rq);
drivers/nvme/host/rdma.c
1542
ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
drivers/nvme/host/rdma.c
1552
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
drivers/nvme/host/rdma.c
1554
blk_rq_payload_bytes(rq) <=
drivers/nvme/host/rdma.c
1574
nvme_rdma_dma_unmap_req(ibdev, rq);
drivers/nvme/host/rdma.c
1693
struct request *rq;
drivers/nvme/host/rdma.c
1696
rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
drivers/nvme/host/rdma.c
1697
if (!rq) {
drivers/nvme/host/rdma.c
1704
req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1944
static void nvme_rdma_complete_timed_out(struct request *rq)
drivers/nvme/host/rdma.c
1946
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1950
nvmf_complete_timed_out_request(rq);
drivers/nvme/host/rdma.c
1953
static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
drivers/nvme/host/rdma.c
1955
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
1963
rq->tag, nvme_cid(rq), cmd->common.opcode,
drivers/nvme/host/rdma.c
1980
nvme_rdma_complete_timed_out(rq);
drivers/nvme/host/rdma.c
1997
struct request *rq = bd->rq;
drivers/nvme/host/rdma.c
1998
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
2000
struct nvme_command *c = nvme_req(rq)->cmd;
drivers/nvme/host/rdma.c
2006
WARN_ON_ONCE(rq->tag < 0);
drivers/nvme/host/rdma.c
2008
if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
drivers/nvme/host/rdma.c
2009
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
drivers/nvme/host/rdma.c
2023
ret = nvme_setup_cmd(ns, rq);
drivers/nvme/host/rdma.c
2027
nvme_start_request(rq);
drivers/nvme/host/rdma.c
2038
err = nvme_rdma_map_data(queue, rq, c);
drivers/nvme/host/rdma.c
2058
nvme_rdma_unmap_data(queue, rq);
drivers/nvme/host/rdma.c
2061
ret = nvme_host_path_error(rq);
drivers/nvme/host/rdma.c
2066
nvme_cleanup_cmd(rq);
drivers/nvme/host/rdma.c
2082
struct request *rq = blk_mq_rq_from_pdu(req);
drivers/nvme/host/rdma.c
2089
nvme_req(rq)->status = NVME_SC_INVALID_PI;
drivers/nvme/host/rdma.c
2096
nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
drivers/nvme/host/rdma.c
2099
nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
drivers/nvme/host/rdma.c
2102
nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
drivers/nvme/host/rdma.c
2111
static void nvme_rdma_complete_rq(struct request *rq)
drivers/nvme/host/rdma.c
2113
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
2120
nvme_rdma_unmap_data(queue, rq);
drivers/nvme/host/rdma.c
2123
nvme_complete_rq(rq);
drivers/nvme/host/rdma.c
286
struct request *rq, unsigned int hctx_idx)
drivers/nvme/host/rdma.c
288
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
294
struct request *rq, unsigned int hctx_idx,
drivers/nvme/host/rdma.c
298
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/rdma.c
302
nvme_req(rq)->ctrl = &ctrl->ctrl;
drivers/nvme/host/rdma.c
309
req->metadata_sgl = (void *)nvme_req(rq) +
drivers/nvme/host/rdma.c
314
nvme_req(rq)->cmd = req->sqe.data;
drivers/nvme/host/tcp.c
1002
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
drivers/nvme/host/tcp.c
1004
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
1015
struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
drivers/nvme/host/tcp.c
1017
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
1019
nvme_tcp_end_request(rq, le16_to_cpu(req->status));
drivers/nvme/host/tcp.c
2631
static void nvme_tcp_complete_timed_out(struct request *rq)
drivers/nvme/host/tcp.c
2633
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
2637
nvmf_complete_timed_out_request(rq);
drivers/nvme/host/tcp.c
2640
static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
drivers/nvme/host/tcp.c
2642
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
2650
rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
drivers/nvme/host/tcp.c
2667
nvme_tcp_complete_timed_out(rq);
drivers/nvme/host/tcp.c
2680
struct request *rq)
drivers/nvme/host/tcp.c
2682
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
2688
if (!blk_rq_nr_phys_segments(rq))
drivers/nvme/host/tcp.c
2690
else if (rq_data_dir(rq) == WRITE &&
drivers/nvme/host/tcp.c
2700
struct request *rq)
drivers/nvme/host/tcp.c
2702
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
2708
ret = nvme_setup_cmd(ns, rq);
drivers/nvme/host/tcp.c
2719
req->data_len = blk_rq_nr_phys_segments(rq) ?
drivers/nvme/host/tcp.c
2720
blk_rq_payload_bytes(rq) : 0;
drivers/nvme/host/tcp.c
2721
req->curr_bio = rq->bio;
drivers/nvme/host/tcp.c
2723
nvme_tcp_init_iter(req, rq_data_dir(rq));
drivers/nvme/host/tcp.c
2725
if (rq_data_dir(rq) == WRITE &&
drivers/nvme/host/tcp.c
2742
ret = nvme_tcp_map_data(queue, rq);
drivers/nvme/host/tcp.c
2744
nvme_cleanup_cmd(rq);
drivers/nvme/host/tcp.c
2766
struct request *rq = bd->rq;
drivers/nvme/host/tcp.c
2767
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
2771
if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
drivers/nvme/host/tcp.c
2772
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
drivers/nvme/host/tcp.c
2774
ret = nvme_tcp_setup_cmd_pdu(ns, rq);
drivers/nvme/host/tcp.c
2778
nvme_start_request(rq);
drivers/nvme/host/tcp.c
300
struct request *rq;
drivers/nvme/host/tcp.c
305
rq = blk_mq_rq_from_pdu(req);
drivers/nvme/host/tcp.c
307
return rq_data_dir(rq) == WRITE && req->data_len &&
drivers/nvme/host/tcp.c
342
struct request *rq = blk_mq_rq_from_pdu(req);
drivers/nvme/host/tcp.c
348
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
drivers/nvme/host/tcp.c
349
vec = &rq->special_vec;
drivers/nvme/host/tcp.c
351
size = blk_rq_payload_bytes(rq);
drivers/nvme/host/tcp.c
542
struct request *rq, unsigned int hctx_idx)
drivers/nvme/host/tcp.c
544
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
550
struct request *rq, unsigned int hctx_idx,
drivers/nvme/host/tcp.c
554
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
568
nvme_req(rq)->ctrl = &ctrl->ctrl;
drivers/nvme/host/tcp.c
569
nvme_req(rq)->cmd = &pdu->cmd;
drivers/nvme/host/tcp.c
626
struct request *rq;
drivers/nvme/host/tcp.c
628
rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
drivers/nvme/host/tcp.c
629
if (!rq) {
drivers/nvme/host/tcp.c
637
req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
641
if (!nvme_try_complete_req(rq, req->status, cqe->result))
drivers/nvme/host/tcp.c
642
nvme_complete_rq(rq);
drivers/nvme/host/tcp.c
651
struct request *rq;
drivers/nvme/host/tcp.c
653
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
drivers/nvme/host/tcp.c
654
if (!rq) {
drivers/nvme/host/tcp.c
661
if (!blk_rq_payload_bytes(rq)) {
drivers/nvme/host/tcp.c
664
nvme_tcp_queue_id(queue), rq->tag);
drivers/nvme/host/tcp.c
674
nvme_tcp_queue_id(queue), rq->tag);
drivers/nvme/host/tcp.c
708
struct request *rq = blk_mq_rq_from_pdu(req);
drivers/nvme/host/tcp.c
733
data->command_id = nvme_cid(rq);
drivers/nvme/host/tcp.c
742
struct request *rq;
drivers/nvme/host/tcp.c
746
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
drivers/nvme/host/tcp.c
747
if (!rq) {
drivers/nvme/host/tcp.c
753
req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
758
rq->tag, r2t_length);
drivers/nvme/host/tcp.c
765
rq->tag, r2t_length, req->data_len, req->data_sent);
drivers/nvme/host/tcp.c
772
rq->tag, r2t_offset, req->data_sent);
drivers/nvme/host/tcp.c
780
rq->tag);
drivers/nvme/host/tcp.c
903
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
drivers/nvme/host/tcp.c
907
if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
drivers/nvme/host/tcp.c
908
nvme_complete_rq(rq);
drivers/nvme/host/tcp.c
915
struct request *rq =
drivers/nvme/host/tcp.c
917
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
drivers/nvme/host/tcp.c
936
nvme_tcp_queue_id(queue), rq->tag);
drivers/nvme/host/tcp.c
956
nvme_tcp_queue_id(queue), rq->tag);
drivers/nvme/host/tcp.c
971
nvme_tcp_end_request(rq,
drivers/nvme/target/loop.c
110
struct request *rq;
drivers/nvme/target/loop.c
112
rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
drivers/nvme/target/loop.c
113
if (!rq) {
drivers/nvme/target/loop.c
120
if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
drivers/nvme/target/loop.c
121
nvme_loop_complete_rq(rq);
drivers/nvme/target/loop.c
138
struct request *req = bd->rq;
drivers/nvme/target/nvmet.h
467
struct request *rq;
drivers/nvme/target/passthru.c
217
struct request *rq = req->p.rq;
drivers/nvme/target/passthru.c
218
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
drivers/nvme/target/passthru.c
219
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/target/passthru.c
224
status = nvme_execute_rq(rq, false);
drivers/nvme/target/passthru.c
241
req->cqe->result = nvme_req(rq)->result;
drivers/nvme/target/passthru.c
243
blk_mq_free_request(rq);
drivers/nvme/target/passthru.c
249
static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
drivers/nvme/target/passthru.c
253
struct nvmet_req *req = rq->end_io_data;
drivers/nvme/target/passthru.c
255
req->cqe->result = nvme_req(rq)->result;
drivers/nvme/target/passthru.c
256
nvmet_req_complete(req, nvme_req(rq)->status);
drivers/nvme/target/passthru.c
257
blk_mq_free_request(rq);
drivers/nvme/target/passthru.c
261
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
drivers/nvme/target/passthru.c
274
ARRAY_SIZE(req->inline_bvec), req_op(rq));
drivers/nvme/target/passthru.c
276
bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
drivers/nvme/target/passthru.c
287
ret = blk_rq_append_bio(rq, bio);
drivers/nvme/target/passthru.c
302
struct request *rq = NULL;
drivers/nvme/target/passthru.c
324
rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
drivers/nvme/target/passthru.c
325
if (IS_ERR(rq)) {
drivers/nvme/target/passthru.c
329
nvme_init_request(rq, req->cmd);
drivers/nvme/target/passthru.c
332
rq->timeout = timeout;
drivers/nvme/target/passthru.c
335
ret = nvmet_passthru_map_sg(req, rq);
drivers/nvme/target/passthru.c
351
req->p.rq = rq;
drivers/nvme/target/passthru.c
354
rq->end_io = nvmet_passthru_req_done;
drivers/nvme/target/passthru.c
355
rq->end_io_data = req;
drivers/nvme/target/passthru.c
356
blk_execute_rq_nowait(rq, false);
drivers/nvme/target/passthru.c
365
blk_mq_free_request(rq);
drivers/pci/pci.c
5802
int pcie_set_readrq(struct pci_dev *dev, int rq)
drivers/pci/pci.c
5809
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
drivers/pci/pci.c
5820
if (mps < rq)
drivers/pci/pci.c
5821
rq = mps;
drivers/pci/pci.c
5824
firstbit = ffs(rq);
drivers/pci/pci.c
5832
if (rq > max_mrrs) {
drivers/pci/pci.c
5833
pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
drivers/platform/chrome/wilco_ec/debugfs.c
178
struct ec_request rq;
drivers/platform/chrome/wilco_ec/debugfs.c
183
memset(&rq, 0, sizeof(rq));
drivers/platform/chrome/wilco_ec/debugfs.c
184
rq.cmd = CMD_KB_CHROME;
drivers/platform/chrome/wilco_ec/debugfs.c
185
rq.sub_cmd = sub_cmd;
drivers/platform/chrome/wilco_ec/debugfs.c
189
msg.request_data = &rq;
drivers/platform/chrome/wilco_ec/debugfs.c
190
msg.request_size = sizeof(rq);
drivers/platform/chrome/wilco_ec/mailbox.c
101
rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
drivers/platform/chrome/wilco_ec/mailbox.c
102
rq->checksum += wilco_ec_checksum(msg->request_data, msg->request_size);
drivers/platform/chrome/wilco_ec/mailbox.c
103
rq->checksum = -rq->checksum;
drivers/platform/chrome/wilco_ec/mailbox.c
117
struct wilco_ec_request *rq)
drivers/platform/chrome/wilco_ec/mailbox.c
124
ret = cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
drivers/platform/chrome/wilco_ec/mailbox.c
127
ret = cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
drivers/platform/chrome/wilco_ec/mailbox.c
204
struct wilco_ec_request *rq;
drivers/platform/chrome/wilco_ec/mailbox.c
212
rq = ec->data_buffer;
drivers/platform/chrome/wilco_ec/mailbox.c
213
wilco_ec_prepare(msg, rq);
drivers/platform/chrome/wilco_ec/mailbox.c
215
ret = wilco_ec_transfer(ec, msg, rq);
drivers/platform/chrome/wilco_ec/mailbox.c
92
struct wilco_ec_request *rq)
drivers/platform/chrome/wilco_ec/mailbox.c
94
memset(rq, 0, sizeof(*rq));
drivers/platform/chrome/wilco_ec/mailbox.c
95
rq->struct_version = EC_MAILBOX_PROTO_VERSION;
drivers/platform/chrome/wilco_ec/mailbox.c
96
rq->mailbox_id = msg->type;
drivers/platform/chrome/wilco_ec/mailbox.c
97
rq->mailbox_version = EC_MAILBOX_VERSION;
drivers/platform/chrome/wilco_ec/mailbox.c
98
rq->data_size = msg->request_size;
drivers/platform/chrome/wilco_ec/properties.c
35
struct ec_property_request *rq,
drivers/platform/chrome/wilco_ec/properties.c
43
ec_msg.request_data = rq;
drivers/platform/chrome/wilco_ec/properties.c
44
ec_msg.request_size = sizeof(*rq);
drivers/platform/chrome/wilco_ec/properties.c
51
if (rs->op != rq->op)
drivers/platform/chrome/wilco_ec/properties.c
53
if (memcmp(rq->property_id, rs->property_id, sizeof(rs->property_id)))
drivers/platform/chrome/wilco_ec/properties.c
62
struct ec_property_request rq;
drivers/platform/chrome/wilco_ec/properties.c
66
memset(&rq, 0, sizeof(rq));
drivers/platform/chrome/wilco_ec/properties.c
67
rq.op = EC_OP_GET;
drivers/platform/chrome/wilco_ec/properties.c
68
put_unaligned_le32(prop_msg->property_id, rq.property_id);
drivers/platform/chrome/wilco_ec/properties.c
70
ret = send_property_msg(ec, &rq, &rs);
drivers/platform/chrome/wilco_ec/properties.c
84
struct ec_property_request rq;
drivers/platform/chrome/wilco_ec/properties.c
88
memset(&rq, 0, sizeof(rq));
drivers/platform/chrome/wilco_ec/properties.c
89
rq.op = EC_OP_SET;
drivers/platform/chrome/wilco_ec/properties.c
90
put_unaligned_le32(prop_msg->property_id, rq.property_id);
drivers/platform/chrome/wilco_ec/properties.c
91
rq.length = prop_msg->length;
drivers/platform/chrome/wilco_ec/properties.c
92
memcpy(rq.data, prop_msg->data, prop_msg->length);
drivers/platform/chrome/wilco_ec/properties.c
94
ret = send_property_msg(ec, &rq, &rs);
drivers/platform/chrome/wilco_ec/sysfs.c
158
struct usb_charge_request *rq,
drivers/platform/chrome/wilco_ec/sysfs.c
166
msg.request_data = rq;
drivers/platform/chrome/wilco_ec/sysfs.c
167
msg.request_size = sizeof(*rq);
drivers/platform/chrome/wilco_ec/sysfs.c
183
struct usb_charge_request rq;
drivers/platform/chrome/wilco_ec/sysfs.c
187
memset(&rq, 0, sizeof(rq));
drivers/platform/chrome/wilco_ec/sysfs.c
188
rq.cmd = CMD_USB_CHARGE;
drivers/platform/chrome/wilco_ec/sysfs.c
189
rq.op = USB_CHARGE_GET;
drivers/platform/chrome/wilco_ec/sysfs.c
191
ret = send_usb_charge(ec, &rq, &rs);
drivers/platform/chrome/wilco_ec/sysfs.c
203
struct usb_charge_request rq;
drivers/platform/chrome/wilco_ec/sysfs.c
214
memset(&rq, 0, sizeof(rq));
drivers/platform/chrome/wilco_ec/sysfs.c
215
rq.cmd = CMD_USB_CHARGE;
drivers/platform/chrome/wilco_ec/sysfs.c
216
rq.op = USB_CHARGE_SET;
drivers/platform/chrome/wilco_ec/sysfs.c
217
rq.val = val;
drivers/platform/chrome/wilco_ec/sysfs.c
219
ret = send_usb_charge(ec, &rq, &rs);
drivers/platform/chrome/wilco_ec/sysfs.c
74
struct boot_on_ac_request rq;
drivers/platform/chrome/wilco_ec/sysfs.c
85
memset(&rq, 0, sizeof(rq));
drivers/platform/chrome/wilco_ec/sysfs.c
86
rq.cmd = CMD_KB_CMOS;
drivers/platform/chrome/wilco_ec/sysfs.c
87
rq.sub_cmd = SUB_CMD_KB_CMOS_AUTO_ON;
drivers/platform/chrome/wilco_ec/sysfs.c
88
rq.val = val;
drivers/platform/chrome/wilco_ec/sysfs.c
92
msg.request_data = &rq;
drivers/platform/chrome/wilco_ec/sysfs.c
93
msg.request_size = sizeof(rq);
drivers/platform/chrome/wilco_ec/telemetry.c
154
static int check_telem_request(struct wilco_ec_telem_request *rq,
drivers/platform/chrome/wilco_ec/telemetry.c
159
if (rq->reserved)
drivers/platform/chrome/wilco_ec/telemetry.c
162
switch (rq->command) {
drivers/platform/chrome/wilco_ec/telemetry.c
164
max_size += sizeof(rq->args.get_log);
drivers/platform/chrome/wilco_ec/telemetry.c
167
max_size += sizeof(rq->args.get_version);
drivers/platform/chrome/wilco_ec/telemetry.c
170
max_size += sizeof(rq->args.get_fan_info);
drivers/platform/chrome/wilco_ec/telemetry.c
173
max_size += sizeof(rq->args.get_diag_info);
drivers/platform/chrome/wilco_ec/telemetry.c
176
max_size += sizeof(rq->args.get_temp_info);
drivers/platform/chrome/wilco_ec/telemetry.c
179
max_size += sizeof(rq->args.get_temp_read);
drivers/platform/chrome/wilco_ec/telemetry.c
182
max_size += sizeof(rq->args.get_batt_ext_info);
drivers/platform/chrome/wilco_ec/telemetry.c
185
if (rq->args.get_batt_ppid_info.always1 != 1)
drivers/platform/chrome/wilco_ec/telemetry.c
188
max_size += sizeof(rq->args.get_batt_ppid_info);
drivers/ptp/ptp_chardev.c
23
struct ptp_clock_request rq;
drivers/ptp/ptp_chardev.c
26
memset(&rq, 0, sizeof(rq));
drivers/ptp/ptp_chardev.c
32
rq.type = PTP_CLK_REQ_EXTTS;
drivers/ptp/ptp_chardev.c
33
rq.extts.index = chan;
drivers/ptp/ptp_chardev.c
34
err = ops->enable(ops, &rq, 0);
drivers/ptp/ptp_chardev.c
37
rq.type = PTP_CLK_REQ_PEROUT;
drivers/ptp/ptp_chardev.c
38
rq.perout.index = chan;
drivers/ptp/ptp_chardev.c
39
err = ops->enable(ops, &rq, 0);
drivers/ptp/ptp_clockmatrix.c
1920
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_clockmatrix.c
1928
switch (rq->type) {
drivers/ptp/ptp_clockmatrix.c
1931
err = idtcm_perout_enable(channel, &rq->perout, false);
drivers/ptp/ptp_clockmatrix.c
1933
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
drivers/ptp/ptp_clockmatrix.c
1934
rq->perout.period.nsec)
drivers/ptp/ptp_clockmatrix.c
1937
err = idtcm_perout_enable(channel, &rq->perout, true);
drivers/ptp/ptp_clockmatrix.c
1940
err = idtcm_extts_enable(channel, rq, on);
drivers/ptp/ptp_clockmatrix.c
274
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_clockmatrix.c
276
u8 index = rq->extts.index;
drivers/ptp/ptp_dte.c
211
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_fc3.c
535
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_fc3.c
541
switch (rq->type) {
drivers/ptp/ptp_fc3.c
546
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
drivers/ptp/ptp_fc3.c
547
rq->perout.period.nsec)
drivers/ptp/ptp_fc3.c
555
if ((rq->extts.flags & PTP_EXT_OFFSET) != (PTP_EXT_OFFSET))
drivers/ptp/ptp_idt82p33.c
236
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_idt82p33.c
238
u8 index = rq->extts.index;
drivers/ptp/ptp_idt82p33.c
932
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_idt82p33.c
941
switch (rq->type) {
drivers/ptp/ptp_idt82p33.c
945
&rq->perout);
drivers/ptp/ptp_idt82p33.c
947
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
drivers/ptp/ptp_idt82p33.c
948
rq->perout.period.nsec)
drivers/ptp/ptp_idt82p33.c
952
&rq->perout);
drivers/ptp/ptp_idt82p33.c
955
err = idt82p33_extts_enable(channel, rq, on);
drivers/ptp/ptp_kvm_common.c
106
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_netc.c
362
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_netc.c
425
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_netc.c
428
u32 channel = rq->perout.index;
drivers/ptp/ptp_netc.c
448
period.tv_sec = rq->perout.period.sec;
drivers/ptp/ptp_netc.c
449
period.tv_nsec = rq->perout.period.nsec;
drivers/ptp/ptp_netc.c
478
stime.tv_sec = rq->perout.start.sec;
drivers/ptp/ptp_netc.c
479
stime.tv_nsec = rq->perout.start.nsec;
drivers/ptp/ptp_netc.c
521
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_netc.c
523
int index = rq->extts.index;
drivers/ptp/ptp_netc.c
528
if ((rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
drivers/ptp/ptp_netc.c
533
netc_timer_handle_etts_event(priv, rq->extts.index, false);
drivers/ptp/ptp_netc.c
536
if (rq->extts.flags & PTP_FALLING_EDGE)
drivers/ptp/ptp_netc.c
598
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_netc.c
602
switch (rq->type) {
drivers/ptp/ptp_netc.c
604
return netc_timer_enable_pps(priv, rq, on);
drivers/ptp/ptp_netc.c
606
return net_timer_enable_perout(priv, rq, on);
drivers/ptp/ptp_netc.c
608
return netc_timer_enable_extts(priv, rq, on);
drivers/ptp/ptp_ocp.c
1375
ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
drivers/ptp/ptp_ocp.c
1383
switch (rq->type) {
drivers/ptp/ptp_ocp.c
1386
switch (rq->extts.index) {
drivers/ptp/ptp_ocp.c
1412
switch (rq->perout.index) {
drivers/ptp/ptp_ocp.c
1417
if (on && (rq->perout.period.sec != 1 ||
drivers/ptp/ptp_ocp.c
1418
rq->perout.period.nsec != 0))
drivers/ptp/ptp_ocp.c
1425
req = rq->perout.index - 1;
drivers/ptp/ptp_ocp.c
1427
err = ptp_ocp_signal_from_perout(bp, req, &rq->perout);
drivers/ptp/ptp_pch.c
402
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_pch.c
406
switch (rq->type) {
drivers/ptp/ptp_pch.c
408
switch (rq->extts.index) {
drivers/ptp/ptp_qoriq.c
301
struct ptp_clock_request *rq, int on)
drivers/ptp/ptp_qoriq.c
308
switch (rq->type) {
drivers/ptp/ptp_qoriq.c
310
switch (rq->extts.index) {
drivers/ptp/ptp_qoriq.c
322
extts_clean_up(ptp_qoriq, rq->extts.index, false);
drivers/ptp/ptp_vmclock.c
318
struct ptp_clock_request *rq, int on)
drivers/s390/block/dasd.c
3023
struct request *req = qd->rq;
drivers/s390/block/dasd_eckd.c
3766
void *rq;
drivers/s390/block/dasd_eckd.c
3776
rq = req ? blk_mq_rq_to_pdu(req) : NULL;
drivers/s390/block/dasd_eckd.c
3787
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
drivers/s390/block/scm_blk.c
245
blk_mq_kick_requeue_list(bdev->rq);
drivers/s390/block/scm_blk.c
287
struct request *req = qd->rq;
drivers/s390/block/scm_blk.h
18
struct request_queue *rq;
drivers/s390/block/scm_blk.h
38
#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
drivers/s390/char/con3270.c
302
static void tty3270_write_callback(struct raw3270_request *rq, void *data)
drivers/s390/char/con3270.c
304
struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
drivers/s390/char/con3270.c
306
if (rq->rc != 0) {
drivers/s390/char/con3270.c
311
raw3270_request_reset(rq);
drivers/s390/char/con3270.c
312
xchg(&tp->write, rq);
drivers/s390/char/con3270.c
473
static void tty3270_update_lines_visible(struct tty3270 *tp, struct raw3270_request *rq)
drivers/s390/char/con3270.c
483
if (raw3270_request_add_data(rq, tp->converted_line, len))
drivers/s390/char/con3270.c
494
static void tty3270_update_lines_all(struct tty3270 *tp, struct raw3270_request *rq)
drivers/s390/char/con3270.c
505
if (raw3270_request_add_data(rq, tp->converted_line, len))
drivers/s390/char/con3270.c
515
if (raw3270_request_add_data(rq, buf, sizeof(buf)))
drivers/s390/char/con3270.c
733
static void tty3270_read_callback(struct raw3270_request *rq, void *data)
drivers/s390/char/con3270.c
735
struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
drivers/s390/char/con3270.c
737
raw3270_get_view(rq->view);
drivers/s390/char/con3270.c
799
static void tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
drivers/s390/char/con3270.c
809
if (rq) {
drivers/s390/char/con3270.c
811
rq->rc = -EIO;
drivers/s390/char/con3270.c
816
rq->rescnt = irb->scsw.cmd.count;
drivers/s390/char/fs3270.c
103
fp = (struct fs3270 *)rq->view;
drivers/s390/char/fs3270.c
104
if (rq->rc != 0 || rq->rescnt != 0) {
drivers/s390/char/fs3270.c
109
raw3270_request_reset(rq);
drivers/s390/char/fs3270.c
159
static void fs3270_save_callback(struct raw3270_request *rq, void *data)
drivers/s390/char/fs3270.c
163
fp = (struct fs3270 *)rq->view;
drivers/s390/char/fs3270.c
175
if (rq->rc != 0 || rq->rescnt == 0) {
drivers/s390/char/fs3270.c
180
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
drivers/s390/char/fs3270.c
182
raw3270_request_reset(rq);
drivers/s390/char/fs3270.c
216
static void fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq,
drivers/s390/char/fs3270.c
225
if (rq) {
drivers/s390/char/fs3270.c
227
rq->rc = -EIO;
drivers/s390/char/fs3270.c
230
rq->rescnt = irb->scsw.cmd.count;
drivers/s390/char/fs3270.c
241
struct raw3270_request *rq;
drivers/s390/char/fs3270.c
253
rq = raw3270_request_alloc(0);
drivers/s390/char/fs3270.c
254
if (!IS_ERR(rq)) {
drivers/s390/char/fs3270.c
257
raw3270_request_set_cmd(rq, fp->read_command ? : 2);
drivers/s390/char/fs3270.c
258
raw3270_request_set_idal(rq, ib);
drivers/s390/char/fs3270.c
262
rc = fs3270_do_io(&fp->view, rq);
drivers/s390/char/fs3270.c
264
count -= rq->rescnt;
drivers/s390/char/fs3270.c
271
raw3270_request_free(rq);
drivers/s390/char/fs3270.c
273
rc = PTR_ERR(rq);
drivers/s390/char/fs3270.c
286
struct raw3270_request *rq;
drivers/s390/char/fs3270.c
297
rq = raw3270_request_alloc(0);
drivers/s390/char/fs3270.c
298
if (!IS_ERR(rq)) {
drivers/s390/char/fs3270.c
303
raw3270_request_set_cmd(rq, write_command);
drivers/s390/char/fs3270.c
304
raw3270_request_set_idal(rq, ib);
drivers/s390/char/fs3270.c
305
rc = fs3270_do_io(&fp->view, rq);
drivers/s390/char/fs3270.c
307
rc = count - rq->rescnt;
drivers/s390/char/fs3270.c
311
raw3270_request_free(rq);
drivers/s390/char/fs3270.c
313
rc = PTR_ERR(rq);
drivers/s390/char/fs3270.c
47
static void fs3270_wake_up(struct raw3270_request *rq, void *data)
drivers/s390/char/fs3270.c
61
static int fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
drivers/s390/char/fs3270.c
67
rq->callback = fs3270_wake_up;
drivers/s390/char/fs3270.c
68
rq->callback_data = &fp->wait;
drivers/s390/char/fs3270.c
78
rc = raw3270_start(view, rq);
drivers/s390/char/fs3270.c
81
wait_event(fp->wait, raw3270_request_final(rq));
drivers/s390/char/fs3270.c
90
static void fs3270_reset_callback(struct raw3270_request *rq, void *data)
drivers/s390/char/fs3270.c
94
fp = (struct fs3270 *)rq->view;
drivers/s390/char/fs3270.c
95
raw3270_request_reset(rq);
drivers/s390/char/fs3270.c
99
static void fs3270_restore_callback(struct raw3270_request *rq, void *data)
drivers/s390/char/raw3270.c
146
struct raw3270_request *rq;
drivers/s390/char/raw3270.c
149
rq = kzalloc_obj(*rq, GFP_KERNEL | GFP_DMA);
drivers/s390/char/raw3270.c
150
if (!rq)
drivers/s390/char/raw3270.c
155
rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
drivers/s390/char/raw3270.c
156
if (!rq->buffer) {
drivers/s390/char/raw3270.c
157
kfree(rq);
drivers/s390/char/raw3270.c
161
rq->size = size;
drivers/s390/char/raw3270.c
162
INIT_LIST_HEAD(&rq->list);
drivers/s390/char/raw3270.c
167
if (rq->buffer)
drivers/s390/char/raw3270.c
168
rq->ccw.cda = virt_to_dma32(rq->buffer);
drivers/s390/char/raw3270.c
169
rq->ccw.flags = CCW_FLAG_SLI;
drivers/s390/char/raw3270.c
171
return rq;
drivers/s390/char/raw3270.c
178
void raw3270_request_free(struct raw3270_request *rq)
drivers/s390/char/raw3270.c
180
kfree(rq->buffer);
drivers/s390/char/raw3270.c
181
kfree(rq);
drivers/s390/char/raw3270.c
188
int raw3270_request_reset(struct raw3270_request *rq)
drivers/s390/char/raw3270.c
190
if (WARN_ON_ONCE(!list_empty(&rq->list)))
drivers/s390/char/raw3270.c
192
rq->ccw.cmd_code = 0;
drivers/s390/char/raw3270.c
193
rq->ccw.count = 0;
drivers/s390/char/raw3270.c
194
if (rq->buffer)
drivers/s390/char/raw3270.c
195
rq->ccw.cda = virt_to_dma32(rq->buffer);
drivers/s390/char/raw3270.c
196
rq->ccw.flags = CCW_FLAG_SLI;
drivers/s390/char/raw3270.c
197
rq->rescnt = 0;
drivers/s390/char/raw3270.c
198
rq->rc = 0;
drivers/s390/char/raw3270.c
206
void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
drivers/s390/char/raw3270.c
208
rq->ccw.cmd_code = cmd;
drivers/s390/char/raw3270.c
215
int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
drivers/s390/char/raw3270.c
217
if (size + rq->ccw.count > rq->size)
drivers/s390/char/raw3270.c
219
memcpy(rq->buffer + rq->ccw.count, data, size);
drivers/s390/char/raw3270.c
220
rq->ccw.count += size;
drivers/s390/char/raw3270.c
228
void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
drivers/s390/char/raw3270.c
230
rq->ccw.cda = virt_to_dma32(data);
drivers/s390/char/raw3270.c
231
rq->ccw.count = size;
drivers/s390/char/raw3270.c
238
void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
drivers/s390/char/raw3270.c
240
rq->ccw.cda = virt_to_dma32(ib->data);
drivers/s390/char/raw3270.c
241
rq->ccw.count = ib->size;
drivers/s390/char/raw3270.c
242
rq->ccw.flags |= CCW_FLAG_IDA;
drivers/s390/char/raw3270.c
251
struct raw3270_request *rq)
drivers/s390/char/raw3270.c
253
rq->view = view;
drivers/s390/char/raw3270.c
258
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
drivers/s390/char/raw3270.c
259
(unsigned long)rq, 0, 0);
drivers/s390/char/raw3270.c
260
if (rq->rc) {
drivers/s390/char/raw3270.c
262
return rq->rc;
drivers/s390/char/raw3270.c
265
list_add_tail(&rq->list, &rp->req_queue);
drivers/s390/char/raw3270.c
276
int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
drivers/s390/char/raw3270.c
289
rc = __raw3270_start(rp, view, rq);
drivers/s390/char/raw3270.c
295
int raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq,
drivers/s390/char/raw3270.c
300
rc = raw3270_request_reset(rq);
drivers/s390/char/raw3270.c
303
raw3270_request_set_cmd(rq, cmd);
drivers/s390/char/raw3270.c
304
rc = raw3270_request_add_data(rq, data, len);
drivers/s390/char/raw3270.c
307
return raw3270_start(view, rq);
drivers/s390/char/raw3270.c
311
int raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
drivers/s390/char/raw3270.c
322
rc = __raw3270_start(rp, view, rq);
drivers/s390/char/raw3270.c
327
int raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
drivers/s390/char/raw3270.c
332
rq->view = view;
drivers/s390/char/raw3270.c
334
list_add_tail(&rq->list, &rp->req_queue);
drivers/s390/char/raw3270.c
346
struct raw3270_request *rq;
drivers/s390/char/raw3270.c
351
rq = (struct raw3270_request *)intparm;
drivers/s390/char/raw3270.c
352
view = rq ? rq->view : rp->view;
drivers/s390/char/raw3270.c
371
view->fn->intv(view, rq, irb);
drivers/s390/char/raw3270.c
378
if (rq && !list_empty(&rq->list)) {
drivers/s390/char/raw3270.c
380
list_del_init(&rq->list);
drivers/s390/char/raw3270.c
381
if (rq->callback)
drivers/s390/char/raw3270.c
382
rq->callback(rq, rq->callback_data);
drivers/s390/char/raw3270.c
392
rq = list_entry(rp->req_queue.next, struct raw3270_request, list);
drivers/s390/char/raw3270.c
393
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
drivers/s390/char/raw3270.c
394
(unsigned long)rq, 0, 0);
drivers/s390/char/raw3270.c
395
if (rq->rc == 0)
drivers/s390/char/raw3270.c
398
list_del_init(&rq->list);
drivers/s390/char/raw3270.c
399
if (rq->callback)
drivers/s390/char/raw3270.c
400
rq->callback(rq, rq->callback_data);
drivers/s390/char/raw3270.c
567
void raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
drivers/s390/char/raw3270.c
569
struct raw3270 *rp = rq->view->dev;
drivers/s390/char/raw3270.c
614
static void raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
drivers/s390/char/raw3270.c
616
struct raw3270 *rp = rq->view->dev;
drivers/s390/char/raw3270.c
620
if (rq->rc) {
drivers/s390/char/raw3270.c
681
struct raw3270_request *rq;
drivers/s390/char/raw3270.c
688
rq = list_entry(rp->req_queue.next, struct raw3270_request, list);
drivers/s390/char/raw3270.c
689
view = rq->view;
drivers/s390/char/raw3270.c
690
rq->rc = -EACCES;
drivers/s390/char/raw3270.c
691
list_del_init(&rq->list);
drivers/s390/char/raw3270.c
692
if (rq->callback)
drivers/s390/char/raw3270.c
693
rq->callback(rq, rq->callback_data);
drivers/s390/char/raw3270.c
700
static void raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
drivers/s390/char/raw3270.c
705
if (rq) {
drivers/s390/char/raw3270.c
708
rq->rc = -EOPNOTSUPP;
drivers/s390/char/raw3270.c
710
rq->rc = -EIO;
drivers/s390/char/raw3270.h
100
void raw3270_read_modified_cb(struct raw3270_request *rq, void *data);
drivers/s390/char/raw3270.h
30
void (*callback)(struct raw3270_request *rq, void *data);
drivers/s390/char/raw3270.h
35
void raw3270_request_free(struct raw3270_request *rq);
drivers/s390/char/raw3270.h
36
int raw3270_request_reset(struct raw3270_request *rq);
drivers/s390/char/raw3270.h
37
void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd);
drivers/s390/char/raw3270.h
38
int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size);
drivers/s390/char/raw3270.h
39
void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size);
drivers/s390/char/raw3270.h
40
void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib);
drivers/s390/char/raw3270.h
43
raw3270_request_final(struct raw3270_request *rq)
drivers/s390/char/raw3270.h
45
return list_empty(&rq->list);
drivers/s390/char/raw3270.h
54
int (*activate)(struct raw3270_view *rq);
drivers/s390/char/raw3270.h
55
void (*deactivate)(struct raw3270_view *rq);
drivers/s390/char/raw3270.h
57
struct raw3270_request *rq, struct irb *ib);
drivers/s390/char/raw3270.h
92
int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq);
drivers/s390/char/raw3270.h
93
int raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq);
drivers/s390/char/raw3270.h
94
int raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq);
drivers/s390/char/raw3270.h
98
int raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq,
drivers/s390/cio/chsc.h
215
u8 rq;
drivers/s390/net/qeth_core.h
1078
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/s390/net/qeth_core.h
1079
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/s390/net/qeth_core_main.c
6523
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
drivers/s390/net/qeth_core_main.c
6549
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/s390/net/qeth_core_main.c
6557
mii_data = if_mii(rq);
drivers/s390/net/qeth_core_main.c
6561
mii_data = if_mii(rq);
drivers/s390/net/qeth_l3_main.c
1518
static int qeth_l3_ndo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
drivers/s390/net/qeth_l3_main.c
1532
rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
drivers/s390/net/qeth_l3_main.c
1560
rc = qeth_siocdevprivate(dev, rq, data, cmd);
drivers/scsi/aha1542.c
264
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/aha1542.c
269
rq_for_each_segment(bv, rq, iter) {
drivers/scsi/aha1542.c
448
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/aha1542.c
453
rq_for_each_segment(bv, rq, iter) {
drivers/scsi/bnx2fc/bnx2fc.h
331
void *rq;
drivers/scsi/bnx2fc/bnx2fc_hwi.c
1463
char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
drivers/scsi/bnx2fc/bnx2fc_tgt.c
698
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
drivers/scsi/bnx2fc/bnx2fc_tgt.c
700
if (!tgt->rq) {
drivers/scsi/bnx2fc/bnx2fc_tgt.c
872
if (tgt->rq) {
drivers/scsi/bnx2fc/bnx2fc_tgt.c
874
tgt->rq, tgt->rq_dma);
drivers/scsi/bnx2fc/bnx2fc_tgt.c
875
tgt->rq = NULL;
drivers/scsi/elx/efct/efct_hw.c
1081
efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
drivers/scsi/elx/efct/efct_hw.c
1099
struct hw_rq *rq = hw->hw_rq[i];
drivers/scsi/elx/efct/efct_hw.c
1101
hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
drivers/scsi/elx/efct/efct_hw.c
1240
struct hw_rq *rq = hw->hw_rq[i];
drivers/scsi/elx/efct/efct_hw.c
1243
rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
drivers/scsi/elx/efct/efct_hw.c
1244
rq->entry_count,
drivers/scsi/elx/efct/efct_hw.c
1246
if (!rq->hdr_buf) {
drivers/scsi/elx/efct/efct_hw.c
1254
i, rq->hdr->id, rq->entry_count, hdr_size);
drivers/scsi/elx/efct/efct_hw.c
1259
rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
drivers/scsi/elx/efct/efct_hw.c
1260
rq->entry_count,
drivers/scsi/elx/efct/efct_hw.c
1262
if (!rq->payload_buf) {
drivers/scsi/elx/efct/efct_hw.c
1269
i, rq->data->id, rq->entry_count, payload_size);
drivers/scsi/elx/efct/efct_hw.c
1300
struct hw_rq *rq = hw->hw_rq[rq_idx];
drivers/scsi/elx/efct/efct_hw.c
1302
for (i = 0; i < rq->entry_count - 1; i++) {
drivers/scsi/elx/efct/efct_hw.c
1307
seq->header = &rq->hdr_buf[i];
drivers/scsi/elx/efct/efct_hw.c
1308
seq->payload = &rq->payload_buf[i];
drivers/scsi/elx/efct/efct_hw.c
1330
struct hw_rq *rq = hw->hw_rq[i];
drivers/scsi/elx/efct/efct_hw.c
1332
if (rq) {
drivers/scsi/elx/efct/efct_hw.c
1333
efct_hw_rx_buffer_free(hw, rq->hdr_buf,
drivers/scsi/elx/efct/efct_hw.c
1334
rq->entry_count);
drivers/scsi/elx/efct/efct_hw.c
1335
rq->hdr_buf = NULL;
drivers/scsi/elx/efct/efct_hw.c
1336
efct_hw_rx_buffer_free(hw, rq->payload_buf,
drivers/scsi/elx/efct/efct_hw.c
1337
rq->entry_count);
drivers/scsi/elx/efct/efct_hw.c
1338
rq->payload_buf = NULL;
drivers/scsi/elx/efct/efct_hw.c
3462
sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
drivers/scsi/elx/efct/efct_hw.c
748
struct hw_rq *rq = hw->hw_rq[i];
drivers/scsi/elx/efct/efct_hw.c
752
u32 mask = (rq->filter_mask != 0) ?
drivers/scsi/elx/efct/efct_hw.c
753
rq->filter_mask : 1;
drivers/scsi/elx/efct/efct_hw.c
758
rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
drivers/scsi/elx/efct/efct_hw.c
759
efct_logfcfi(hw, j, i, rq->hdr->id);
drivers/scsi/elx/efct/efct_hw.c
781
struct hw_rq *rq;
drivers/scsi/elx/efct/efct_hw.c
798
rq = hw->hw_rq[0];
drivers/scsi/elx/efct/efct_hw.c
799
rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
drivers/scsi/elx/efct/efct_hw.c
800
rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
drivers/scsi/elx/efct/efct_hw.h
385
struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ];
drivers/scsi/elx/efct/efct_hw.h
752
void efct_hw_del_rq(struct hw_rq *rq);
drivers/scsi/elx/efct/efct_hw_queues.c
314
struct hw_rq *rq = NULL;
drivers/scsi/elx/efct/efct_hw_queues.c
327
rq = kzalloc_obj(*rq);
drivers/scsi/elx/efct/efct_hw_queues.c
328
if (!rq)
drivers/scsi/elx/efct/efct_hw_queues.c
331
rqs[i] = rq;
drivers/scsi/elx/efct/efct_hw_queues.c
332
rq->instance = hw->hw_rq_count++;
drivers/scsi/elx/efct/efct_hw_queues.c
333
rq->cq = cqs[i];
drivers/scsi/elx/efct/efct_hw_queues.c
334
rq->type = SLI4_QTYPE_RQ;
drivers/scsi/elx/efct/efct_hw_queues.c
335
rq->entry_count = entry_count;
drivers/scsi/elx/efct/efct_hw_queues.c
338
rq->hdr = &hw->rq[hw->rq_count];
drivers/scsi/elx/efct/efct_hw_queues.c
339
rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
drivers/scsi/elx/efct/efct_hw_queues.c
340
hw->hw_rq_lookup[hw->rq_count] = rq->instance;
drivers/scsi/elx/efct/efct_hw_queues.c
342
qs[q_count] = rq->hdr;
drivers/scsi/elx/efct/efct_hw_queues.c
345
rq->data = &hw->rq[hw->rq_count];
drivers/scsi/elx/efct/efct_hw_queues.c
346
rq->data_entry_size = hw->config.rq_default_buffer_size;
drivers/scsi/elx/efct/efct_hw_queues.c
347
hw->hw_rq_lookup[hw->rq_count] = rq->instance;
drivers/scsi/elx/efct/efct_hw_queues.c
349
qs[q_count + 1] = rq->data;
drivers/scsi/elx/efct/efct_hw_queues.c
351
rq->rq_tracker = NULL;
drivers/scsi/elx/efct/efct_hw_queues.c
455
efct_hw_del_rq(struct hw_rq *rq)
drivers/scsi/elx/efct/efct_hw_queues.c
459
if (!rq)
drivers/scsi/elx/efct/efct_hw_queues.c
462
kfree(rq->rq_tracker);
drivers/scsi/elx/efct/efct_hw_queues.c
463
rq->rq_tracker = NULL;
drivers/scsi/elx/efct/efct_hw_queues.c
464
list_del(&rq->list_entry);
drivers/scsi/elx/efct/efct_hw_queues.c
465
hw = rq->cq->eq->hw;
drivers/scsi/elx/efct/efct_hw_queues.c
466
hw->hw_rq[rq->instance] = NULL;
drivers/scsi/elx/efct/efct_hw_queues.c
467
kfree(rq);
drivers/scsi/elx/efct/efct_hw_queues.c
492
struct sli4_queue *rq_hdr = &hw->rq[rqindex];
drivers/scsi/elx/efct/efct_hw_queues.c
494
struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
drivers/scsi/elx/efct/efct_hw_queues.c
507
seq = rq->rq_tracker[bufindex];
drivers/scsi/elx/efct/efct_hw_queues.c
508
rq->rq_tracker[bufindex] = NULL;
drivers/scsi/elx/efct/efct_hw_queues.c
531
struct hw_rq *rq;
drivers/scsi/elx/efct/efct_hw_queues.c
581
rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
drivers/scsi/elx/efct/efct_hw_queues.c
582
rq->use_count++;
drivers/scsi/elx/efct/efct_hw_queues.c
604
struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
drivers/scsi/elx/efct/efct_hw_queues.c
605
struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
drivers/scsi/elx/efct/efct_hw_queues.c
607
struct hw_rq *rq = hw->hw_rq[hw_rq_index];
drivers/scsi/elx/efct/efct_hw_queues.c
642
if (!rq->rq_tracker[qindex_hdr]) {
drivers/scsi/elx/efct/efct_hw_queues.c
643
rq->rq_tracker[qindex_hdr] = seq;
drivers/scsi/elx/libefc_sli/sli4.c
371
struct sli4_rqst_rq_create_v1 *rq;
drivers/scsi/elx/libefc_sli/sli4.c
376
rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1),
drivers/scsi/elx/libefc_sli/sli4.c
378
if (!rq)
drivers/scsi/elx/libefc_sli/sli4.c
381
sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
drivers/scsi/elx/libefc_sli/sli4.c
384
rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB;
drivers/scsi/elx/libefc_sli/sli4.c
388
rq->num_pages = cpu_to_le16(num_pages);
drivers/scsi/elx/libefc_sli/sli4.c
399
rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE);
drivers/scsi/elx/libefc_sli/sli4.c
401
rq->rqe_size_byte |= SLI4_RQE_SIZE_8;
drivers/scsi/elx/libefc_sli/sli4.c
403
rq->page_size = SLI4_RQ_PAGE_SIZE_4096;
drivers/scsi/elx/libefc_sli/sli4.c
412
rq->buffer_size = cpu_to_le32(buffer_size);
drivers/scsi/elx/libefc_sli/sli4.c
414
rq->cq_id = cpu_to_le16(cq_id);
drivers/scsi/elx/libefc_sli/sli4.c
419
rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
drivers/scsi/elx/libefc_sli/sli4.c
420
rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
drivers/scsi/esas2r/esas2r.h
1004
bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1009
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1015
void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1022
void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1036
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1042
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1048
void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1050
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1054
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1060
void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1064
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1071
struct esas2r_request *rq, struct esas2r_sg_context *sgc);
drivers/scsi/esas2r/esas2r.h
1074
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1077
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1079
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1080
void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1082
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1097
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1099
bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1131
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
1135
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1165
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1169
sgc->first_req = rq;
drivers/scsi/esas2r/esas2r.h
1175
sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
drivers/scsi/esas2r/esas2r.h
1184
rq->vrq->scsi.sg_list_offset = (u8)
drivers/scsi/esas2r/esas2r.h
1186
(u8 *)rq->vrq);
drivers/scsi/esas2r/esas2r.h
1189
sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
drivers/scsi/esas2r/esas2r.h
1190
rq->vrq->scsi.sg_list_offset =
drivers/scsi/esas2r/esas2r.h
1196
static inline void esas2r_rq_init_request(struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1199
union atto_vda_req *vrq = rq->vrq;
drivers/scsi/esas2r/esas2r.h
1201
INIT_LIST_HEAD(&rq->sg_table_head);
drivers/scsi/esas2r/esas2r.h
1202
rq->data_buf = (void *)(vrq + 1);
drivers/scsi/esas2r/esas2r.h
1203
rq->interrupt_cb = NULL;
drivers/scsi/esas2r/esas2r.h
1204
rq->comp_cb = esas2r_complete_request_cb;
drivers/scsi/esas2r/esas2r.h
1205
rq->flags = 0;
drivers/scsi/esas2r/esas2r.h
1206
rq->timeout = 0;
drivers/scsi/esas2r/esas2r.h
1207
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r.h
1208
rq->req_type = RT_INI_REQ;
drivers/scsi/esas2r/esas2r.h
1211
rq->func_rsp.dwords[0] = 0;
drivers/scsi/esas2r/esas2r.h
1212
rq->func_rsp.dwords[1] = 0;
drivers/scsi/esas2r/esas2r.h
1221
rq->vda_req_sz = RQ_SIZE_DEFAULT;
drivers/scsi/esas2r/esas2r.h
1232
a->req_table[LOWORD(vrq->scsi.handle)] = rq;
drivers/scsi/esas2r/esas2r.h
1256
= cpu_to_le64(rq->vrq_md->phys_addr +
drivers/scsi/esas2r/esas2r.h
1260
static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1265
if (list_empty(&rq->sg_table_head))
drivers/scsi/esas2r/esas2r.h
1269
list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
drivers/scsi/esas2r/esas2r.h
1273
static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1277
esas2r_rq_free_sg_lists(rq, a);
drivers/scsi/esas2r/esas2r.h
1278
a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
drivers/scsi/esas2r/esas2r.h
1279
rq->data_buf = NULL;
drivers/scsi/esas2r/esas2r.h
1300
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r.h
1303
if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
drivers/scsi/esas2r/esas2r.h
1390
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r.h
1394
esas2r_build_ae_req(a, rq);
drivers/scsi/esas2r/esas2r.h
1397
esas2r_start_vda_request(a, rq);
drivers/scsi/esas2r/esas2r.h
1404
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r.h
1408
rq = list_entry(element, struct esas2r_request, comp_list);
drivers/scsi/esas2r/esas2r.h
1410
esas2r_complete_request(a, rq);
drivers/scsi/esas2r/esas2r.h
406
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r.h
965
int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_disc.c
1002
rq->req_stat, hi->status);
drivers/scsi/esas2r/esas2r_disc.c
1018
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
1023
esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
1045
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
1048
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
1084
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
1087
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
1163
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_disc.c
1171
rq = list_entry(element, struct esas2r_request, req_list);
drivers/scsi/esas2r/esas2r_disc.c
1172
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
drivers/scsi/esas2r/esas2r_disc.c
1173
t = a->targetdb + rq->target_id;
drivers/scsi/esas2r/esas2r_disc.c
1176
rq->vrq->scsi.target_id = le16_to_cpu(
drivers/scsi/esas2r/esas2r_disc.c
1179
rq->req_stat = RS_SEL;
drivers/scsi/esas2r/esas2r_disc.c
160
struct esas2r_request *rq = &a->general_req;
drivers/scsi/esas2r/esas2r_disc.c
174
if (rq->interrupt_cx == NULL)
drivers/scsi/esas2r/esas2r_disc.c
177
if (rq->req_stat == RS_STARTED
drivers/scsi/esas2r/esas2r_disc.c
178
&& rq->timeout <= RQ_MAX_TIMEOUT) {
drivers/scsi/esas2r/esas2r_disc.c
180
esas2r_wait_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
182
if (rq->req_stat == RS_TIMEOUT) {
drivers/scsi/esas2r/esas2r_disc.c
183
esas2r_disc_abort(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
189
if (rq->req_stat == RS_PENDING
drivers/scsi/esas2r/esas2r_disc.c
190
|| rq->req_stat == RS_STARTED)
drivers/scsi/esas2r/esas2r_disc.c
193
esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
313
struct esas2r_request *rq = &a->general_req;
drivers/scsi/esas2r/esas2r_disc.c
359
rq->interrupt_cx = dc;
drivers/scsi/esas2r/esas2r_disc.c
360
rq->req_stat = RS_SUCCESS;
drivers/scsi/esas2r/esas2r_disc.c
377
ret = esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
387
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
390
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
400
rslt = esas2r_disc_dev_remove(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
405
rslt = esas2r_disc_dev_add(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
410
rslt = esas2r_disc_block_dev_scan(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
415
rslt = esas2r_disc_raid_grp_info(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
420
rslt = esas2r_disc_part_info(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
425
rslt = esas2r_disc_passthru_dev_info(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
429
rslt = esas2r_disc_passthru_dev_addr(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
448
rq->interrupt_cx = NULL;
drivers/scsi/esas2r/esas2r_disc.c
460
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
465
if (rq->timeout < ESAS2R_DEFAULT_TMO)
drivers/scsi/esas2r/esas2r_disc.c
466
rq->timeout = ESAS2R_DEFAULT_TMO;
drivers/scsi/esas2r/esas2r_disc.c
473
rq->req_type = RT_DISC_REQ;
drivers/scsi/esas2r/esas2r_disc.c
479
esas2r_disc_local_start_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
481
list_add_tail(&rq->req_list, &a->defer_list);
drivers/scsi/esas2r/esas2r_disc.c
489
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
49
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
493
list_add_tail(&rq->req_list, &a->active_list);
drivers/scsi/esas2r/esas2r_disc.c
495
esas2r_start_vda_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
503
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
506
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
51
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
518
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
521
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
526
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
529
rq,
drivers/scsi/esas2r/esas2r_disc.c
536
rq->comp_cb = esas2r_disc_block_dev_scan_cb;
drivers/scsi/esas2r/esas2r_disc.c
538
rq->timeout = 30000;
drivers/scsi/esas2r/esas2r_disc.c
539
rq->interrupt_cx = dc;
drivers/scsi/esas2r/esas2r_disc.c
541
rslt = esas2r_disc_start_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
549
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
55
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
552
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
559
if (rq->req_stat == RS_SUCCESS)
drivers/scsi/esas2r/esas2r_disc.c
560
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
drivers/scsi/esas2r/esas2r_disc.c
565
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
570
esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
578
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
581
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
59
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
597
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
599
grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
drivers/scsi/esas2r/esas2r_disc.c
604
rq,
drivers/scsi/esas2r/esas2r_disc.c
61
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
613
rq->comp_cb = esas2r_disc_raid_grp_info_cb;
drivers/scsi/esas2r/esas2r_disc.c
615
rq->interrupt_cx = dc;
drivers/scsi/esas2r/esas2r_disc.c
617
rslt = esas2r_disc_start_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
625
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
628
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
63
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
636
if (rq->req_stat == RS_SCAN_GEN) {
drivers/scsi/esas2r/esas2r_disc.c
637
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
drivers/scsi/esas2r/esas2r_disc.c
642
if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_disc.c
643
grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
drivers/scsi/esas2r/esas2r_disc.c
65
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
662
if (!(rq->req_stat == RS_GRP_INVALID)) {
drivers/scsi/esas2r/esas2r_disc.c
666
rq->req_stat);
drivers/scsi/esas2r/esas2r_disc.c
67
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
675
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
680
esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
688
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
69
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
691
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
708
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
71
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
710
partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
drivers/scsi/esas2r/esas2r_disc.c
715
rq,
drivers/scsi/esas2r/esas2r_disc.c
728
rq->comp_cb = esas2r_disc_part_info_cb;
drivers/scsi/esas2r/esas2r_disc.c
73
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
730
rq->interrupt_cx = dc;
drivers/scsi/esas2r/esas2r_disc.c
732
rslt = esas2r_disc_start_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
740
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
743
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
75
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
751
if (rq->req_stat == RS_SCAN_GEN) {
drivers/scsi/esas2r/esas2r_disc.c
752
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
drivers/scsi/esas2r/esas2r_disc.c
755
} else if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_disc.c
756
partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
drivers/scsi/esas2r/esas2r_disc.c
766
if (!(rq->req_stat == RS_PART_LAST)) {
drivers/scsi/esas2r/esas2r_disc.c
769
"failed - status:%d", rq->req_stat);
drivers/scsi/esas2r/esas2r_disc.c
77
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
776
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
781
esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
789
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
79
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
792
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
800
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
802
devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
drivers/scsi/esas2r/esas2r_disc.c
807
rq,
drivers/scsi/esas2r/esas2r_disc.c
81
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_disc.c
814
rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
drivers/scsi/esas2r/esas2r_disc.c
816
rq->interrupt_cx = dc;
drivers/scsi/esas2r/esas2r_disc.c
818
rslt = esas2r_disc_start_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
826
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
829
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
837
if (rq->req_stat == RS_SCAN_GEN) {
drivers/scsi/esas2r/esas2r_disc.c
838
dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
drivers/scsi/esas2r/esas2r_disc.c
841
} else if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_disc.c
842
devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
drivers/scsi/esas2r/esas2r_disc.c
844
dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
drivers/scsi/esas2r/esas2r_disc.c
860
if (!(rq->req_stat == RS_DEV_INVALID)) {
drivers/scsi/esas2r/esas2r_disc.c
863
"status:%d", rq->req_stat);
drivers/scsi/esas2r/esas2r_disc.c
869
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
874
esas2r_disc_continue(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
882
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
885
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
892
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
901
esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
drivers/scsi/esas2r/esas2r_disc.c
903
esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
drivers/scsi/esas2r/esas2r_disc.c
905
if (!esas2r_build_sg_list(a, rq, &sgc)) {
drivers/scsi/esas2r/esas2r_disc.c
906
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_disc.c
913
rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
drivers/scsi/esas2r/esas2r_disc.c
915
rq->interrupt_cx = dc;
drivers/scsi/esas2r/esas2r_disc.c
932
rslt = esas2r_disc_start_request(a, rq);
drivers/scsi/esas2r/esas2r_disc.c
940
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_disc.c
943
(struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_disc.c
955
if (rq->req_stat == RS_SUCCESS
drivers/scsi/esas2r/esas2r_flash.c
1213
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_flash.c
1215
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
drivers/scsi/esas2r/esas2r_flash.c
1217
if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_flash.c
1223
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_flash.c
1228
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_flash.c
1241
if (rq->req_stat != RS_PENDING) {
drivers/scsi/esas2r/esas2r_flash.c
1243
if (rq->req_stat == RS_SUCCESS)
drivers/scsi/esas2r/esas2r_flash.c
1258
bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_flash.c
1264
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
drivers/scsi/esas2r/esas2r_flash.c
1304
rq,
drivers/scsi/esas2r/esas2r_flash.c
1324
rq->interrupt_cb = esas2r_nvram_callback;
drivers/scsi/esas2r/esas2r_flash.c
1325
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_flash.c
134
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_flash.c
136
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
drivers/scsi/esas2r/esas2r_flash.c
138
(struct esas2r_flash_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_flash.c
1391
struct esas2r_request *rq, struct esas2r_sg_context *sgc)
drivers/scsi/esas2r/esas2r_flash.c
140
if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_flash.c
1407
rq->req_stat = RS_SUCCESS;
drivers/scsi/esas2r/esas2r_flash.c
1408
rq->interrupt_cx = fc;
drivers/scsi/esas2r/esas2r_flash.c
1418
return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
drivers/scsi/esas2r/esas2r_flash.c
1422
return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
drivers/scsi/esas2r/esas2r_flash.c
1428
return complete_fmapi_req(a, rq, fi->status);
drivers/scsi/esas2r/esas2r_flash.c
148
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_flash.c
1500
return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
drivers/scsi/esas2r/esas2r_flash.c
1507
return complete_fmapi_req(a, rq, FI_STAT_INVALID);
drivers/scsi/esas2r/esas2r_flash.c
1516
if (!load_image(a, rq))
drivers/scsi/esas2r/esas2r_flash.c
1517
return complete_fmapi_req(a, rq, FI_STAT_FAILED);
drivers/scsi/esas2r/esas2r_flash.c
1519
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_flash.c
154
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_flash.c
155
rq->interrupt_cb = fc->interrupt_cb;
drivers/scsi/esas2r/esas2r_flash.c
163
if (rq->req_stat != RS_PENDING)
drivers/scsi/esas2r/esas2r_flash.c
169
(*fc->interrupt_cb)(a, rq);
drivers/scsi/esas2r/esas2r_flash.c
177
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_flash.c
180
(struct esas2r_flash_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_flash.c
190
rq->interrupt_cb = esas2r_fmapi_callback;
drivers/scsi/esas2r/esas2r_flash.c
192
rq->interrupt_cb = fc->interrupt_cb;
drivers/scsi/esas2r/esas2r_flash.c
195
rq,
drivers/scsi/esas2r/esas2r_flash.c
201
esas2r_rq_free_sg_lists(rq, a);
drivers/scsi/esas2r/esas2r_flash.c
212
esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
drivers/scsi/esas2r/esas2r_flash.c
214
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_flash.c
215
rq->req_stat = RS_BUSY;
drivers/scsi/esas2r/esas2r_flash.c
227
static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_flash.c
233
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_flash.c
237
build_flash_msg(a, rq);
drivers/scsi/esas2r/esas2r_flash.c
239
return rq->req_stat == RS_PENDING;
drivers/scsi/esas2r/esas2r_flash.c
303
struct esas2r_request *rq, u8 fi_stat)
drivers/scsi/esas2r/esas2r_flash.c
306
(struct esas2r_flash_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_flash.c
310
fi->driver_error = rq->req_stat;
drivers/scsi/esas2r/esas2r_flash.c
311
rq->interrupt_cb = NULL;
drivers/scsi/esas2r/esas2r_flash.c
312
rq->req_stat = RS_SUCCESS;
drivers/scsi/esas2r/esas2r_flash.c
324
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_flash.c
327
(struct esas2r_flash_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_flash.c
334
if (rq->req_stat != RS_SUCCESS)
drivers/scsi/esas2r/esas2r_flash.c
538
complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
drivers/scsi/esas2r/esas2r_flash.c
549
if (!load_image(a, rq)) {
drivers/scsi/esas2r/esas2r_flash.c
556
complete_fmapi_req(a, rq, FI_STAT_FAILED);
drivers/scsi/esas2r/esas2r_flash.c
828
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_flash.c
831
(struct esas2r_ioctl_fs *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_flash.c
833
if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
drivers/scsi/esas2r/esas2r_flash.c
836
fs->driver_error = rq->req_stat;
drivers/scsi/esas2r/esas2r_flash.c
847
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_flash.c
898
rq->interrupt_cb = esas2r_complete_fs_ioctl;
drivers/scsi/esas2r/esas2r_flash.c
899
rq->interrupt_cx = fs;
drivers/scsi/esas2r/esas2r_flash.c
902
rq,
drivers/scsi/esas2r/esas2r_flash.c
915
esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
drivers/scsi/esas2r/esas2r_flash.c
918
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_flash.c
927
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_init.c
104
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_init.c
1141
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_init.c
1154
rq,
drivers/scsi/esas2r/esas2r_init.c
1158
ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
drivers/scsi/esas2r/esas2r_init.c
1162
rq->flags |= RF_FAILURE_OK;
drivers/scsi/esas2r/esas2r_init.c
1168
if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_init.c
1174
rq->func_rsp.cfg_rsp.vda_version);
drivers/scsi/esas2r/esas2r_init.c
1175
a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
drivers/scsi/esas2r/esas2r_init.c
1177
rq->func_rsp.cfg_rsp.fw_release);
drivers/scsi/esas2r/esas2r_init.c
1194
rq,
drivers/scsi/esas2r/esas2r_init.c
1199
rq->vrq->cfg.sg_list_offset = offsetof(
drivers/scsi/esas2r/esas2r_init.c
1202
rq->vrq->cfg.data.prde.ctl_len =
drivers/scsi/esas2r/esas2r_init.c
1204
rq->vrq->cfg.data.prde.address = cpu_to_le64(
drivers/scsi/esas2r/esas2r_init.c
1205
rq->vrq_md->phys_addr +
drivers/scsi/esas2r/esas2r_init.c
1207
rq->flags |= RF_FAILURE_OK;
drivers/scsi/esas2r/esas2r_init.c
1215
ci = (struct atto_vda_cfg_init *)rq->data_buf;
drivers/scsi/esas2r/esas2r_init.c
1216
if (rq->req_stat == RS_SUCCESS) {
drivers/scsi/esas2r/esas2r_init.c
1228
rq->req_stat = RS_SUCCESS;
drivers/scsi/esas2r/esas2r_init.c
1241
struct esas2r_request *rq = &a->general_req;
drivers/scsi/esas2r/esas2r_init.c
1243
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_init.c
1244
rq->comp_cb = esas2r_dummy_complete;
drivers/scsi/esas2r/esas2r_init.c
125
rq->vrq_md = memdesc;
drivers/scsi/esas2r/esas2r_init.c
1250
if (esas2r_format_init_msg(a, rq)) {
drivers/scsi/esas2r/esas2r_init.c
1254
esas2r_start_vda_request(a, rq);
drivers/scsi/esas2r/esas2r_init.c
1256
esas2r_wait_request(a, rq);
drivers/scsi/esas2r/esas2r_init.c
1257
if (rq->req_stat != RS_PENDING)
drivers/scsi/esas2r/esas2r_init.c
126
rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
drivers/scsi/esas2r/esas2r_init.c
1262
if (rq->req_stat == RS_SUCCESS
drivers/scsi/esas2r/esas2r_init.c
1263
|| ((rq->flags & RF_FAILURE_OK)
drivers/scsi/esas2r/esas2r_init.c
1264
&& rq->req_stat != RS_TIMEOUT))
drivers/scsi/esas2r/esas2r_init.c
1268
a->init_msg, rq->req_stat, rq->flags);
drivers/scsi/esas2r/esas2r_init.c
127
rq->vrq->scsi.handle = a->num_vrqs;
drivers/scsi/esas2r/esas2r_init.c
1274
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_init.c
1282
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_init.c
1304
for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
drivers/scsi/esas2r/esas2r_init.c
1305
esas2r_start_ae_request(a, rq);
drivers/scsi/esas2r/esas2r_init.c
768
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_init.c
912
for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
drivers/scsi/esas2r/esas2r_init.c
914
INIT_LIST_HEAD(&rq->req_list);
drivers/scsi/esas2r/esas2r_init.c
915
if (!alloc_vda_req(a, rq)) {
drivers/scsi/esas2r/esas2r_init.c
921
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_init.c
924
rq->comp_cb = esas2r_ae_complete;
drivers/scsi/esas2r/esas2r_int.c
173
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_int.c
181
if (unlikely(rq->req_stat != RS_SUCCESS)) {
drivers/scsi/esas2r/esas2r_int.c
182
memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
drivers/scsi/esas2r/esas2r_int.c
184
if (rq->req_stat == RS_ABORTED) {
drivers/scsi/esas2r/esas2r_int.c
185
if (rq->timeout > RQ_MAX_TIMEOUT)
drivers/scsi/esas2r/esas2r_int.c
186
rq->req_stat = RS_TIMEOUT;
drivers/scsi/esas2r/esas2r_int.c
187
} else if (rq->req_stat == RS_SCSI_ERROR) {
drivers/scsi/esas2r/esas2r_int.c
188
u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
drivers/scsi/esas2r/esas2r_int.c
197
rq->req_stat = RS_SUCCESS;
drivers/scsi/esas2r/esas2r_int.c
198
rq->func_rsp.scsi_rsp.scsi_stat =
drivers/scsi/esas2r/esas2r_int.c
210
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_int.c
262
rq = a->req_table[LOWORD(handle)];
drivers/scsi/esas2r/esas2r_int.c
264
if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
drivers/scsi/esas2r/esas2r_int.c
269
list_del(&rq->req_list);
drivers/scsi/esas2r/esas2r_int.c
272
rq->req_stat = rsp->req_stat;
drivers/scsi/esas2r/esas2r_int.c
275
esas2r_trace("rq: %p", rq);
drivers/scsi/esas2r/esas2r_int.c
276
esas2r_trace("req_status: %x", rq->req_stat);
drivers/scsi/esas2r/esas2r_int.c
278
if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
drivers/scsi/esas2r/esas2r_int.c
279
esas2r_handle_outbound_rsp_err(a, rq, rsp);
drivers/scsi/esas2r/esas2r_int.c
285
memcpy(&rq->func_rsp, &rsp->func_rsp,
drivers/scsi/esas2r/esas2r_int.c
290
list_add_tail(&rq->comp_list, &comp_list);
drivers/scsi/esas2r/esas2r_int.c
309
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_int.c
347
rq = list_entry(element, struct esas2r_request,
drivers/scsi/esas2r/esas2r_int.c
350
if (rq->req_stat != RS_PENDING) {
drivers/scsi/esas2r/esas2r_int.c
352
list_add_tail(&rq->comp_list, &comp_list);
drivers/scsi/esas2r/esas2r_int.c
360
else if (rq->req_type == RT_DISC_REQ) {
drivers/scsi/esas2r/esas2r_int.c
362
esas2r_disc_local_start_request(a, rq);
drivers/scsi/esas2r/esas2r_int.c
365
esas2r_local_start_request(a, rq);
drivers/scsi/esas2r/esas2r_int.c
390
struct esas2r_request *rq = &a->general_req;
drivers/scsi/esas2r/esas2r_int.c
403
if (rq->interrupt_cx) {
drivers/scsi/esas2r/esas2r_int.c
404
dc = (struct esas2r_disc_context *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_int.c
418
rq->interrupt_cx = NULL;
drivers/scsi/esas2r/esas2r_int.c
419
rq->interrupt_cb = NULL;
drivers/scsi/esas2r/esas2r_int.c
421
rq->comp_cb = esas2r_dummy_complete;
drivers/scsi/esas2r/esas2r_int.c
433
rq = list_entry(element, struct esas2r_request, req_list);
drivers/scsi/esas2r/esas2r_int.c
435
if (rq->req_stat == RS_STARTED)
drivers/scsi/esas2r/esas2r_int.c
436
if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
drivers/scsi/esas2r/esas2r_int.c
437
list_add_tail(&rq->comp_list, &comp_list);
drivers/scsi/esas2r/esas2r_int.c
448
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_int.c
462
rq = list_entry(element, struct esas2r_request, req_list);
drivers/scsi/esas2r/esas2r_int.c
463
if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
drivers/scsi/esas2r/esas2r_int.c
464
list_add_tail(&rq->comp_list, &comp_list);
drivers/scsi/esas2r/esas2r_int.c
749
void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_int.c
752
(union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
drivers/scsi/esas2r/esas2r_int.c
753
u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
drivers/scsi/esas2r/esas2r_int.c
755
(union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
drivers/scsi/esas2r/esas2r_int.c
766
rq, length);
drivers/scsi/esas2r/esas2r_int.c
848
esas2r_start_ae_request(a, rq);
drivers/scsi/esas2r/esas2r_int.c
876
void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_int.c
880
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_int.c
884
snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
drivers/scsi/esas2r/esas2r_int.c
886
if (snslen > rq->sense_len)
drivers/scsi/esas2r/esas2r_int.c
887
snslen = rq->sense_len;
drivers/scsi/esas2r/esas2r_int.c
890
if (rq->sense_buf)
drivers/scsi/esas2r/esas2r_int.c
891
memcpy(rq->sense_buf, rq->data_buf, snslen);
drivers/scsi/esas2r/esas2r_int.c
893
rq->sense_buf = (u8 *)rq->data_buf;
drivers/scsi/esas2r/esas2r_int.c
897
u8 *s = (u8 *)rq->data_buf;
drivers/scsi/esas2r/esas2r_int.c
904
rq->target_id);
drivers/scsi/esas2r/esas2r_int.c
905
esas2r_target_state_changed(a, rq->target_id,
drivers/scsi/esas2r/esas2r_int.c
915
rq->sense_len = snslen;
drivers/scsi/esas2r/esas2r_int.c
920
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_int.c
922
if (rq->vrq->scsi.function == VDA_FUNC_FLASH
drivers/scsi/esas2r/esas2r_int.c
923
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
drivers/scsi/esas2r/esas2r_int.c
928
if (rq->interrupt_cb) {
drivers/scsi/esas2r/esas2r_int.c
929
(*rq->interrupt_cb)(a, rq);
drivers/scsi/esas2r/esas2r_int.c
931
if (rq->req_stat == RS_PENDING) {
drivers/scsi/esas2r/esas2r_int.c
932
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_int.c
937
if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
drivers/scsi/esas2r/esas2r_int.c
938
&& unlikely(rq->req_stat != RS_SUCCESS)) {
drivers/scsi/esas2r/esas2r_int.c
939
esas2r_check_req_rsp_sense(a, rq);
drivers/scsi/esas2r/esas2r_int.c
940
esas2r_log_request_failure(a, rq);
drivers/scsi/esas2r/esas2r_int.c
943
(*rq->comp_cb)(a, rq);
drivers/scsi/esas2r/esas2r_io.c
120
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_io.c
123
esas2r_trace("rq=%p", rq);
drivers/scsi/esas2r/esas2r_io.c
124
esas2r_trace("rq->vrq:%p", rq->vrq);
drivers/scsi/esas2r/esas2r_io.c
125
esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
drivers/scsi/esas2r/esas2r_io.c
127
if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
drivers/scsi/esas2r/esas2r_io.c
128
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
drivers/scsi/esas2r/esas2r_io.c
131
list_add_tail(&rq->req_list, &a->active_list);
drivers/scsi/esas2r/esas2r_io.c
132
esas2r_start_vda_request(a, rq);
drivers/scsi/esas2r/esas2r_io.c
138
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_io.c
143
rq->req_stat = RS_STARTED;
drivers/scsi/esas2r/esas2r_io.c
164
if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
drivers/scsi/esas2r/esas2r_io.c
165
rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
drivers/scsi/esas2r/esas2r_io.c
167
element->address = cpu_to_le64(rq->vrq_md->phys_addr);
drivers/scsi/esas2r/esas2r_io.c
168
element->length = cpu_to_le32(rq->vda_req_sz);
drivers/scsi/esas2r/esas2r_io.c
176
esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
drivers/scsi/esas2r/esas2r_io.c
178
esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
drivers/scsi/esas2r/esas2r_io.c
190
struct esas2r_request *rq = sgc->first_req;
drivers/scsi/esas2r/esas2r_io.c
191
union atto_vda_req *vrq = rq->vrq;
drivers/scsi/esas2r/esas2r_io.c
273
- (u8 *)rq->sg_table->
drivers/scsi/esas2r/esas2r_io.c
288
rq->vda_req_sz =
drivers/scsi/esas2r/esas2r_io.c
302
list_add(&sgl->next_desc, &rq->sg_table_head);
drivers/scsi/esas2r/esas2r_io.c
337
(u8 *)rq->sg_table->virt_addr));
drivers/scsi/esas2r/esas2r_io.c
354
if (reqsize > rq->vda_req_sz)
drivers/scsi/esas2r/esas2r_io.c
355
rq->vda_req_sz = reqsize;
drivers/scsi/esas2r/esas2r_io.c
373
struct esas2r_request *rq = sgc->first_req;
drivers/scsi/esas2r/esas2r_io.c
458
list_add(&sgl->next_desc, &rq->sg_table_head);
drivers/scsi/esas2r/esas2r_io.c
46
void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_io.c
49
struct esas2r_request *startrq = rq;
drivers/scsi/esas2r/esas2r_io.c
512
if (!list_empty(&rq->sg_table_head)) {
drivers/scsi/esas2r/esas2r_io.c
527
struct esas2r_request *rq = sgc->first_req;
drivers/scsi/esas2r/esas2r_io.c
529
struct esas2r_target *t = a->targetdb + rq->target_id;
drivers/scsi/esas2r/esas2r_io.c
533
u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
drivers/scsi/esas2r/esas2r_io.c
54
if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
drivers/scsi/esas2r/esas2r_io.c
540
if (rq->vrq->scsi.function == VDA_FUNC_SCSI
drivers/scsi/esas2r/esas2r_io.c
545
switch (rq->vrq->scsi.cdb[0]) {
drivers/scsi/esas2r/esas2r_io.c
55
rq->req_stat = RS_SEL2;
drivers/scsi/esas2r/esas2r_io.c
57
rq->req_stat = RS_DEGRADED;
drivers/scsi/esas2r/esas2r_io.c
58
} else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
drivers/scsi/esas2r/esas2r_io.c
59
t = a->targetdb + rq->target_id;
drivers/scsi/esas2r/esas2r_io.c
591
rq->vrq->scsi.iblk_cnt_prd = 0;
drivers/scsi/esas2r/esas2r_io.c
600
rq->flags |= RF_1ST_IBLK_BASE;
drivers/scsi/esas2r/esas2r_io.c
63
rq->req_stat = RS_SEL;
drivers/scsi/esas2r/esas2r_io.c
639
rq->vrq->scsi.iblk_cnt_prd++;
drivers/scsi/esas2r/esas2r_io.c
650
reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
drivers/scsi/esas2r/esas2r_io.c
659
if (reqsize > rq->vda_req_sz)
drivers/scsi/esas2r/esas2r_io.c
66
rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
drivers/scsi/esas2r/esas2r_io.c
660
rq->vda_req_sz = reqsize;
drivers/scsi/esas2r/esas2r_io.c
75
rq->req_stat = RS_SEL;
drivers/scsi/esas2r/esas2r_io.c
770
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_io.c
783
rq = list_entry(element, struct esas2r_request, req_list);
drivers/scsi/esas2r/esas2r_io.c
785
if (rq->vrq->scsi.function == VDA_FUNC_SCSI
drivers/scsi/esas2r/esas2r_io.c
786
&& rq->target_id == targetid
drivers/scsi/esas2r/esas2r_io.c
787
&& (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
drivers/scsi/esas2r/esas2r_io.c
79
if (unlikely(rq->req_stat != RS_PENDING)) {
drivers/scsi/esas2r/esas2r_io.c
790
if (rq->req_stat == RS_PENDING) {
drivers/scsi/esas2r/esas2r_io.c
795
if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
drivers/scsi/esas2r/esas2r_io.c
796
list_add_tail(&rq->comp_list,
drivers/scsi/esas2r/esas2r_io.c
80
esas2r_complete_request(a, rq);
drivers/scsi/esas2r/esas2r_io.c
816
rq = list_entry(element, struct esas2r_request,
drivers/scsi/esas2r/esas2r_io.c
818
if (rq->vrq->scsi.function == VDA_FUNC_SCSI
drivers/scsi/esas2r/esas2r_io.c
819
&& rq->target_id == targetid
drivers/scsi/esas2r/esas2r_io.c
820
&& (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
drivers/scsi/esas2r/esas2r_io.c
84
esas2r_trace("rq=%p", rq);
drivers/scsi/esas2r/esas2r_io.c
85
esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
drivers/scsi/esas2r/esas2r_io.c
858
bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_io.c
862
esas2r_trace("rq:%p", rq);
drivers/scsi/esas2r/esas2r_io.c
863
list_del_init(&rq->req_list);
drivers/scsi/esas2r/esas2r_io.c
864
if (rq->timeout > RQ_MAX_TIMEOUT) {
drivers/scsi/esas2r/esas2r_io.c
869
rq->req_stat = RS_BUSY;
drivers/scsi/esas2r/esas2r_io.c
87
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
drivers/scsi/esas2r/esas2r_io.c
874
rq->req_stat = status;
drivers/scsi/esas2r/esas2r_io.c
88
esas2r_trace("rq->target_id=%d", rq->target_id);
drivers/scsi/esas2r/esas2r_io.c
89
esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
drivers/scsi/esas2r/esas2r_ioctl.c
1000
esas2r_sgc_init(sgc, a, rq, NULL);
drivers/scsi/esas2r/esas2r_ioctl.c
1007
rq->target_id = (u16)spt->target_id;
drivers/scsi/esas2r/esas2r_ioctl.c
1008
rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
drivers/scsi/esas2r/esas2r_ioctl.c
1009
memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
drivers/scsi/esas2r/esas2r_ioctl.c
1010
rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
drivers/scsi/esas2r/esas2r_ioctl.c
1011
rq->sense_len = spt->sense_length;
drivers/scsi/esas2r/esas2r_ioctl.c
1012
rq->sense_buf = (u8 *)spt->sense_data;
drivers/scsi/esas2r/esas2r_ioctl.c
1020
rq->aux_req_cx = hi;
drivers/scsi/esas2r/esas2r_ioctl.c
1021
rq->aux_req_cb = rq->comp_cb;
drivers/scsi/esas2r/esas2r_ioctl.c
1022
rq->comp_cb = scsi_passthru_comp_cb;
drivers/scsi/esas2r/esas2r_ioctl.c
1025
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
drivers/scsi/esas2r/esas2r_ioctl.c
1027
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
drivers/scsi/esas2r/esas2r_ioctl.c
1036
rq->vrq->scsi.flags |=
drivers/scsi/esas2r/esas2r_ioctl.c
1039
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
drivers/scsi/esas2r/esas2r_ioctl.c
1042
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_ioctl.c
1047
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
1059
if (hba_ioctl_tunnel(a, hi, rq, sgc))
drivers/scsi/esas2r/esas2r_ioctl.c
1105
if (hba_ioctl_tunnel(a, hi, rq, sgc))
drivers/scsi/esas2r/esas2r_ioctl.c
113
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_ioctl.c
1155
if (hba_ioctl_tunnel(a, hi, rq, sgc))
drivers/scsi/esas2r/esas2r_ioctl.c
1199
struct esas2r_request *rq, void *context)
drivers/scsi/esas2r/esas2r_ioctl.c
120
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_ioctl.c
121
if (rq == NULL) {
drivers/scsi/esas2r/esas2r_ioctl.c
1244
int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_ioctl.c
1250
rq->comp_cb = complete_nvr_req;
drivers/scsi/esas2r/esas2r_ioctl.c
1252
if (esas2r_nvram_write(a, rq, data)) {
drivers/scsi/esas2r/esas2r_ioctl.c
1260
if (rq->req_stat == RS_SUCCESS)
drivers/scsi/esas2r/esas2r_ioctl.c
1272
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_ioctl.c
1393
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_ioctl.c
1394
if (rq == NULL) {
drivers/scsi/esas2r/esas2r_ioctl.c
1401
code = esas2r_write_params(a, rq,
drivers/scsi/esas2r/esas2r_ioctl.c
1405
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
153
rq->comp_cb = complete_fm_api_req;
drivers/scsi/esas2r/esas2r_ioctl.c
157
if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
drivers/scsi/esas2r/esas2r_ioctl.c
176
esas2r_free_request(a, (struct esas2r_request *)rq);
drivers/scsi/esas2r/esas2r_ioctl.c
1802
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
1825
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_ioctl.c
1838
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_ioctl.c
1839
if (rq == NULL) {
drivers/scsi/esas2r/esas2r_ioctl.c
184
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
1844
rq->comp_cb = vda_complete_req;
drivers/scsi/esas2r/esas2r_ioctl.c
1846
sgc.first_req = rq;
drivers/scsi/esas2r/esas2r_ioctl.c
1854
esas2r_process_vda_ioctl(a, vi, rq, &sgc);
drivers/scsi/esas2r/esas2r_ioctl.c
1864
esas2r_free_request(a, (struct esas2r_request *)rq);
drivers/scsi/esas2r/esas2r_ioctl.c
1920
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
1947
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_ioctl.c
1966
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_ioctl.c
1967
if (rq == NULL) {
drivers/scsi/esas2r/esas2r_ioctl.c
1973
rq->comp_cb = fs_api_complete_req;
drivers/scsi/esas2r/esas2r_ioctl.c
1982
if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
drivers/scsi/esas2r/esas2r_ioctl.c
1998
esas2r_free_request(a, (struct esas2r_request *)rq);
drivers/scsi/esas2r/esas2r_ioctl.c
201
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
210
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_ioctl.c
253
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_ioctl.c
254
if (rq == NULL) {
drivers/scsi/esas2r/esas2r_ioctl.c
264
rq->comp_cb = complete_buffered_ioctl_req;
drivers/scsi/esas2r/esas2r_ioctl.c
269
if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
drivers/scsi/esas2r/esas2r_ioctl.c
282
(*bi->done_callback)(a, rq, bi->done_context);
drivers/scsi/esas2r/esas2r_ioctl.c
284
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
296
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_ioctl.c
302
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
drivers/scsi/esas2r/esas2r_ioctl.c
303
esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
drivers/scsi/esas2r/esas2r_ioctl.c
305
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_ioctl.c
310
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
333
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
335
rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
drivers/scsi/esas2r/esas2r_ioctl.c
336
rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
drivers/scsi/esas2r/esas2r_ioctl.c
339
(*rq->aux_req_cb)(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
345
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_ioctl.c
350
struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
drivers/scsi/esas2r/esas2r_ioctl.c
355
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
drivers/scsi/esas2r/esas2r_ioctl.c
356
esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
drivers/scsi/esas2r/esas2r_ioctl.c
359
ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
drivers/scsi/esas2r/esas2r_ioctl.c
365
rq->aux_req_cx = ci;
drivers/scsi/esas2r/esas2r_ioctl.c
366
rq->aux_req_cb = rq->comp_cb;
drivers/scsi/esas2r/esas2r_ioctl.c
367
rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
drivers/scsi/esas2r/esas2r_ioctl.c
369
if (!esas2r_build_sg_list(a, rq, sgc))
drivers/scsi/esas2r/esas2r_ioctl.c
372
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
393
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_ioctl.c
421
rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
drivers/scsi/esas2r/esas2r_ioctl.c
426
rq->target_id = tid;
drivers/scsi/esas2r/esas2r_ioctl.c
427
rq->vrq->scsi.flags |= cpu_to_le32(lun);
drivers/scsi/esas2r/esas2r_ioctl.c
516
if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
drivers/scsi/esas2r/esas2r_ioctl.c
550
rq->target_id = esas2r_targ_get_id(t, a);
drivers/scsi/esas2r/esas2r_ioctl.c
559
t = a->targetdb + rq->target_id;
drivers/scsi/esas2r/esas2r_ioctl.c
571
gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
drivers/scsi/esas2r/esas2r_ioctl.c
578
t = a->targetdb + rq->target_id;
drivers/scsi/esas2r/esas2r_ioctl.c
587
if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
drivers/scsi/esas2r/esas2r_ioctl.c
602
rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
drivers/scsi/esas2r/esas2r_ioctl.c
609
struct esas2r_request *rq, void *context)
drivers/scsi/esas2r/esas2r_ioctl.c
634
if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
drivers/scsi/esas2r/esas2r_ioctl.c
636
gsa->target_id = rq->target_id;
drivers/scsi/esas2r/esas2r_ioctl.c
644
ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
drivers/scsi/esas2r/esas2r_ioctl.c
671
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_ioctl.c
674
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
drivers/scsi/esas2r/esas2r_ioctl.c
676
esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
drivers/scsi/esas2r/esas2r_ioctl.c
678
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_ioctl.c
684
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
690
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
692
struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
drivers/scsi/esas2r/esas2r_ioctl.c
696
spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
drivers/scsi/esas2r/esas2r_ioctl.c
697
spt->sense_length = rq->sense_len;
drivers/scsi/esas2r/esas2r_ioctl.c
699
le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
drivers/scsi/esas2r/esas2r_ioctl.c
701
switch (rq->req_stat) {
drivers/scsi/esas2r/esas2r_ioctl.c
743
(*rq->aux_req_cb)(a, rq);
drivers/scsi/esas2r/esas2r_ioctl.c
747
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_ioctl.c
85
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_ioctl.c
897
if (hba_ioctl_tunnel(a, hi, rq, sgc))
drivers/scsi/esas2r/esas2r_ioctl.c
913
if (hba_ioctl_tunnel(a, hi, rq, sgc))
drivers/scsi/esas2r/esas2r_ioctl.c
983
if (hba_ioctl_tunnel(a, hi, rq, sgc))
drivers/scsi/esas2r/esas2r_main.c
1111
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_main.c
1119
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_main.c
1120
if (rq == NULL) {
drivers/scsi/esas2r/esas2r_main.c
1138
rq->target_id = cmd->device->id;
drivers/scsi/esas2r/esas2r_main.c
1139
rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
drivers/scsi/esas2r/esas2r_main.c
1140
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_main.c
1142
rq->comp_cb = complete_task_management_request;
drivers/scsi/esas2r/esas2r_main.c
1143
rq->task_management_status_ptr = &task_management_status;
drivers/scsi/esas2r/esas2r_main.c
1146
esas2r_debug("issuing target reset (%p) to id %d", rq,
drivers/scsi/esas2r/esas2r_main.c
1148
completed = esas2r_send_task_mgmt(a, rq, 0x20);
drivers/scsi/esas2r/esas2r_main.c
1150
esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
drivers/scsi/esas2r/esas2r_main.c
1152
completed = esas2r_send_task_mgmt(a, rq, 0x10);
drivers/scsi/esas2r/esas2r_main.c
1158
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
1199
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_main.c
1201
u8 reqstatus = rq->req_stat;
drivers/scsi/esas2r/esas2r_main.c
1206
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
drivers/scsi/esas2r/esas2r_main.c
1208
if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
drivers/scsi/esas2r/esas2r_main.c
1211
rq->sense_buf[2], rq->sense_buf[12],
drivers/scsi/esas2r/esas2r_main.c
1212
rq->sense_buf[13],
drivers/scsi/esas2r/esas2r_main.c
1213
rq->vrq->scsi.cdb[0]);
drivers/scsi/esas2r/esas2r_main.c
1217
rq->vrq->scsi.cdb[0]);
drivers/scsi/esas2r/esas2r_main.c
1219
} else if ((rq->vrq->scsi.cdb[0] != INQUIRY
drivers/scsi/esas2r/esas2r_main.c
1220
&& rq->vrq->scsi.cdb[0] != REPORT_LUNS)
drivers/scsi/esas2r/esas2r_main.c
1224
(rq->vrq->scsi.cdb[0] == INQUIRY)) {
drivers/scsi/esas2r/esas2r_main.c
1229
rq->vrq->scsi.cdb[0], reqstatus,
drivers/scsi/esas2r/esas2r_main.c
1230
rq->target_id);
drivers/scsi/esas2r/esas2r_main.c
1236
void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_main.c
1242
timeout = rq->timeout ? rq->timeout : 5000;
drivers/scsi/esas2r/esas2r_main.c
1247
if (rq->req_stat != RS_STARTED)
drivers/scsi/esas2r/esas2r_main.c
1256
rq->req_stat = RS_TIMEOUT;
drivers/scsi/esas2r/esas2r_main.c
145
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_main.c
148
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_main.c
1484
void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_main.c
1488
esas2r_rq_destroy_request(rq, a);
drivers/scsi/esas2r/esas2r_main.c
149
if (rq == NULL)
drivers/scsi/esas2r/esas2r_main.c
1490
list_add(&rq->comp_list, &a->avail_request);
drivers/scsi/esas2r/esas2r_main.c
1496
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_main.c
1506
rq = list_first_entry(&a->avail_request, struct esas2r_request,
drivers/scsi/esas2r/esas2r_main.c
1508
list_del(&rq->comp_list);
drivers/scsi/esas2r/esas2r_main.c
1510
esas2r_rq_init_request(rq, a);
drivers/scsi/esas2r/esas2r_main.c
1512
return rq;
drivers/scsi/esas2r/esas2r_main.c
1517
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_main.c
1519
esas2r_debug("completing request %p\n", rq);
drivers/scsi/esas2r/esas2r_main.c
152
if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
drivers/scsi/esas2r/esas2r_main.c
1521
scsi_dma_unmap(rq->cmd);
drivers/scsi/esas2r/esas2r_main.c
1523
if (unlikely(rq->req_stat != RS_SUCCESS)) {
drivers/scsi/esas2r/esas2r_main.c
1524
esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
drivers/scsi/esas2r/esas2r_main.c
1525
rq->req_stat,
drivers/scsi/esas2r/esas2r_main.c
1526
rq->func_rsp.scsi_rsp.scsi_stat,
drivers/scsi/esas2r/esas2r_main.c
1527
rq->cmd);
drivers/scsi/esas2r/esas2r_main.c
1529
rq->cmd->result =
drivers/scsi/esas2r/esas2r_main.c
1530
((esas2r_req_status_to_error(rq->req_stat) << 16)
drivers/scsi/esas2r/esas2r_main.c
1531
| rq->func_rsp.scsi_rsp.scsi_stat);
drivers/scsi/esas2r/esas2r_main.c
1533
if (rq->req_stat == RS_UNDERRUN)
drivers/scsi/esas2r/esas2r_main.c
1534
scsi_set_resid(rq->cmd,
drivers/scsi/esas2r/esas2r_main.c
1535
le32_to_cpu(rq->func_rsp.scsi_rsp.
drivers/scsi/esas2r/esas2r_main.c
1538
scsi_set_resid(rq->cmd, 0);
drivers/scsi/esas2r/esas2r_main.c
1541
scsi_done(rq->cmd);
drivers/scsi/esas2r/esas2r_main.c
1543
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
155
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
825
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_main.c
838
rq = esas2r_alloc_request(a);
drivers/scsi/esas2r/esas2r_main.c
839
if (unlikely(rq == NULL)) {
drivers/scsi/esas2r/esas2r_main.c
844
rq->cmd = cmd;
drivers/scsi/esas2r/esas2r_main.c
849
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
drivers/scsi/esas2r/esas2r_main.c
851
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
drivers/scsi/esas2r/esas2r_main.c
854
memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
drivers/scsi/esas2r/esas2r_main.c
855
rq->vrq->scsi.length = cpu_to_le32(bufflen);
drivers/scsi/esas2r/esas2r_main.c
856
rq->target_id = cmd->device->id;
drivers/scsi/esas2r/esas2r_main.c
857
rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
drivers/scsi/esas2r/esas2r_main.c
858
rq->sense_buf = cmd->sense_buffer;
drivers/scsi/esas2r/esas2r_main.c
859
rq->sense_len = SCSI_SENSE_BUFFERSIZE;
drivers/scsi/esas2r/esas2r_main.c
861
esas2r_sgc_init(&sgc, a, rq, NULL);
drivers/scsi/esas2r/esas2r_main.c
872
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
878
if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
drivers/scsi/esas2r/esas2r_main.c
880
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
884
esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
drivers/scsi/esas2r/esas2r_main.c
887
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
893
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_main.c
895
(*rq->task_management_status_ptr) = rq->req_stat;
drivers/scsi/esas2r/esas2r_main.c
896
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_main.c
912
struct esas2r_request *rq;
drivers/scsi/esas2r/esas2r_main.c
917
rq = list_entry(element, struct esas2r_request, req_list);
drivers/scsi/esas2r/esas2r_main.c
919
if (rq->cmd == cmd) {
drivers/scsi/esas2r/esas2r_main.c
943
ar->target_id = rq->target_id;
drivers/scsi/esas2r/esas2r_main.c
945
(u8)le32_to_cpu(rq->vrq->scsi.flags));
drivers/scsi/esas2r/esas2r_main.c
953
rq->vrq->scsi.handle;
drivers/scsi/esas2r/esas2r_main.c
960
list_del_init(&rq->req_list);
drivers/scsi/esas2r/esas2r_main.c
961
esas2r_free_request(a, rq);
drivers/scsi/esas2r/esas2r_vda.c
112
rq->vrq->flash.length = cpu_to_le32(datalen);
drivers/scsi/esas2r/esas2r_vda.c
113
rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
drivers/scsi/esas2r/esas2r_vda.c
115
memcpy(rq->vrq->flash.data.file.file_name,
drivers/scsi/esas2r/esas2r_vda.c
119
firstsg = rq->vrq->flash.data.file.sge;
drivers/scsi/esas2r/esas2r_vda.c
126
rq->vrq->cli.cmd_rsp_len =
drivers/scsi/esas2r/esas2r_vda.c
128
rq->vrq->cli.length = cpu_to_le32(datalen);
drivers/scsi/esas2r/esas2r_vda.c
130
firstsg = rq->vrq->cli.sge;
drivers/scsi/esas2r/esas2r_vda.c
152
rq->vrq->mgt.payld_sglst_offset =
drivers/scsi/esas2r/esas2r_vda.c
169
rq->vrq->mgt.length = cpu_to_le32(datalen);
drivers/scsi/esas2r/esas2r_vda.c
172
rq->vrq->mgt.payld_length =
drivers/scsi/esas2r/esas2r_vda.c
175
esas2r_sgc_init(sgc, a, rq,
drivers/scsi/esas2r/esas2r_vda.c
176
rq->vrq->mgt.payld_sge);
drivers/scsi/esas2r/esas2r_vda.c
179
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_vda.c
187
rq->vrq->mgt.length = cpu_to_le32(datalen);
drivers/scsi/esas2r/esas2r_vda.c
194
firstsg = rq->vrq->mgt.sge;
drivers/scsi/esas2r/esas2r_vda.c
198
rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
drivers/scsi/esas2r/esas2r_vda.c
199
rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
drivers/scsi/esas2r/esas2r_vda.c
200
rq->vrq->mgt.dev_index =
drivers/scsi/esas2r/esas2r_vda.c
203
esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
drivers/scsi/esas2r/esas2r_vda.c
220
rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
drivers/scsi/esas2r/esas2r_vda.c
221
rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
drivers/scsi/esas2r/esas2r_vda.c
224
memcpy(&rq->vrq->cfg.data,
drivers/scsi/esas2r/esas2r_vda.c
228
esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
drivers/scsi/esas2r/esas2r_vda.c
229
&rq->vrq->cfg.data);
drivers/scsi/esas2r/esas2r_vda.c
255
esas2r_sgc_init(sgc, a, rq, firstsg);
drivers/scsi/esas2r/esas2r_vda.c
258
if (!esas2r_build_sg_list(a, rq, sgc)) {
drivers/scsi/esas2r/esas2r_vda.c
264
esas2r_start_request(a, rq);
drivers/scsi/esas2r/esas2r_vda.c
270
struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_vda.c
272
struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
drivers/scsi/esas2r/esas2r_vda.c
274
vi->vda_status = rq->req_stat;
drivers/scsi/esas2r/esas2r_vda.c
282
le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
drivers/scsi/esas2r/esas2r_vda.c
289
rq->func_rsp.mgt_rsp.scan_generation;
drivers/scsi/esas2r/esas2r_vda.c
291
rq->func_rsp.mgt_rsp.dev_index);
drivers/scsi/esas2r/esas2r_vda.c
295
le32_to_cpu(rq->func_rsp.mgt_rsp.length);
drivers/scsi/esas2r/esas2r_vda.c
297
esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
drivers/scsi/esas2r/esas2r_vda.c
304
struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
drivers/scsi/esas2r/esas2r_vda.c
327
esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
drivers/scsi/esas2r/esas2r_vda.c
336
le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
drivers/scsi/esas2r/esas2r_vda.c
347
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_vda.c
353
struct atto_vda_flash_req *vrq = &rq->vrq->flash;
drivers/scsi/esas2r/esas2r_vda.c
355
clear_vda_request(rq);
drivers/scsi/esas2r/esas2r_vda.c
357
rq->vrq->scsi.function = VDA_FUNC_FLASH;
drivers/scsi/esas2r/esas2r_vda.c
373
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_vda.c
380
struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
drivers/scsi/esas2r/esas2r_vda.c
382
clear_vda_request(rq);
drivers/scsi/esas2r/esas2r_vda.c
384
rq->vrq->scsi.function = VDA_FUNC_MGT;
drivers/scsi/esas2r/esas2r_vda.c
398
rq->vrq_md->phys_addr +
drivers/scsi/esas2r/esas2r_vda.c
406
rq->vrq_md->phys_addr +
drivers/scsi/esas2r/esas2r_vda.c
414
memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
drivers/scsi/esas2r/esas2r_vda.c
420
void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_vda.c
422
struct atto_vda_ae_req *vrq = &rq->vrq->ae;
drivers/scsi/esas2r/esas2r_vda.c
424
clear_vda_request(rq);
drivers/scsi/esas2r/esas2r_vda.c
426
rq->vrq->scsi.function = VDA_FUNC_AE;
drivers/scsi/esas2r/esas2r_vda.c
435
rq->vrq_md->phys_addr +
drivers/scsi/esas2r/esas2r_vda.c
442
rq->vrq_md->phys_addr +
drivers/scsi/esas2r/esas2r_vda.c
449
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_vda.c
453
struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
drivers/scsi/esas2r/esas2r_vda.c
455
clear_vda_request(rq);
drivers/scsi/esas2r/esas2r_vda.c
457
rq->vrq->scsi.function = VDA_FUNC_IOCTL;
drivers/scsi/esas2r/esas2r_vda.c
466
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_vda.c
471
struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
drivers/scsi/esas2r/esas2r_vda.c
473
clear_vda_request(rq);
drivers/scsi/esas2r/esas2r_vda.c
475
rq->vrq->scsi.function = VDA_FUNC_CFG;
drivers/scsi/esas2r/esas2r_vda.c
487
static void clear_vda_request(struct esas2r_request *rq)
drivers/scsi/esas2r/esas2r_vda.c
489
u32 handle = rq->vrq->scsi.handle;
drivers/scsi/esas2r/esas2r_vda.c
491
memset(rq->vrq, 0, sizeof(*rq->vrq));
drivers/scsi/esas2r/esas2r_vda.c
493
rq->vrq->scsi.handle = handle;
drivers/scsi/esas2r/esas2r_vda.c
495
rq->req_stat = RS_PENDING;
drivers/scsi/esas2r/esas2r_vda.c
499
memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
drivers/scsi/esas2r/esas2r_vda.c
506
INIT_LIST_HEAD(&rq->req_list);
drivers/scsi/esas2r/esas2r_vda.c
59
static void clear_vda_request(struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_vda.c
62
struct esas2r_request *rq);
drivers/scsi/esas2r/esas2r_vda.c
67
struct esas2r_request *rq,
drivers/scsi/esas2r/esas2r_vda.c
93
clear_vda_request(rq);
drivers/scsi/esas2r/esas2r_vda.c
95
rq->vrq->scsi.function = vi->function;
drivers/scsi/esas2r/esas2r_vda.c
96
rq->interrupt_cb = esas2r_complete_vda_ioctl;
drivers/scsi/esas2r/esas2r_vda.c
97
rq->interrupt_cx = vi;
drivers/scsi/fnic/fnic.h
476
____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
drivers/scsi/fnic/fnic.h
501
int fnic_alloc_rq_frame(struct vnic_rq *rq);
drivers/scsi/fnic/fnic.h
502
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
drivers/scsi/fnic/fnic_fcs.c
421
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
drivers/scsi/fnic/fnic_fcs.c
426
struct fnic *fnic = vnic_dev_priv(rq->vdev);
drivers/scsi/fnic/fnic_fcs.c
551
vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
drivers/scsi/fnic/fnic_fcs.c
568
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
drivers/scsi/fnic/fnic_fcs.c
585
int fnic_alloc_rq_frame(struct vnic_rq *rq)
drivers/scsi/fnic/fnic_fcs.c
587
struct fnic *fnic = vnic_dev_priv(rq->vdev);
drivers/scsi/fnic/fnic_fcs.c
609
fnic_queue_rq_desc(rq, buf, pa, len);
drivers/scsi/fnic/fnic_fcs.c
616
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
drivers/scsi/fnic/fnic_fcs.c
619
struct fnic *fnic = vnic_dev_priv(rq->vdev);
drivers/scsi/fnic/fnic_isr.c
244
unsigned int n = ARRAY_SIZE(fnic->rq);
drivers/scsi/fnic/fnic_main.c
1036
if (!ioread32(&fnic->rq[i].ctrl->enable))
drivers/scsi/fnic/fnic_main.c
1037
vnic_rq_enable(&fnic->rq[i]);
drivers/scsi/fnic/fnic_main.c
1082
if (ioread32(&fnic->rq[i].ctrl->enable))
drivers/scsi/fnic/fnic_main.c
1083
vnic_rq_disable(&fnic->rq[i]);
drivers/scsi/fnic/fnic_main.c
1084
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
drivers/scsi/fnic/fnic_main.c
395
error_status = ioread32(&fnic->rq[i].ctrl->error_status);
drivers/scsi/fnic/fnic_main.c
519
err = vnic_rq_disable(&fnic->rq[i]);
drivers/scsi/fnic/fnic_main.c
544
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
drivers/scsi/fnic/fnic_main.c
974
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
drivers/scsi/fnic/fnic_res.c
218
vnic_rq_free(&fnic->rq[i]);
drivers/scsi/fnic/fnic_res.c
275
err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
drivers/scsi/fnic/fnic_res.c
356
vnic_rq_init(&fnic->rq[i],
drivers/scsi/fnic/fnic_res.h
211
static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
drivers/scsi/fnic/fnic_res.h
215
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
drivers/scsi/fnic/fnic_res.h
222
vnic_rq_post(rq, os_buf, 0, dma_addr, len);
drivers/scsi/fnic/fnic_scsi.c
1520
struct request *const rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
1530
mqtag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/fnic_scsi.c
1765
struct request *const rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
1777
abt_tag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/fnic_scsi.c
2005
struct request *const rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
2041
mqtag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/fnic_scsi.c
2352
struct request *const rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
2367
abt_tag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/fnic_scsi.c
2540
struct request *rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
2552
int mqtag = rq->tag;
drivers/scsi/fnic/fnic_scsi.c
2630
mqtag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/fnic_scsi.c
2804
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
drivers/scsi/fnic/fnic_scsi.c
2950
struct request *const rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
2959
tag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/fnic_scsi.c
460
struct request *const rq = scsi_cmd_to_rq(sc);
drivers/scsi/fnic/fnic_scsi.c
499
mqtag = blk_mq_unique_tag(rq);
drivers/scsi/fnic/vnic_rq.c
106
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
drivers/scsi/fnic/vnic_rq.c
107
writeq(paddr, &rq->ctrl->ring_base);
drivers/scsi/fnic/vnic_rq.c
108
iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
drivers/scsi/fnic/vnic_rq.c
109
iowrite32(cq_index, &rq->ctrl->cq_index);
drivers/scsi/fnic/vnic_rq.c
110
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
drivers/scsi/fnic/vnic_rq.c
111
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
drivers/scsi/fnic/vnic_rq.c
112
iowrite32(0, &rq->ctrl->dropped_packet_count);
drivers/scsi/fnic/vnic_rq.c
113
iowrite32(0, &rq->ctrl->error_status);
drivers/scsi/fnic/vnic_rq.c
116
fetch_index = ioread32(&rq->ctrl->fetch_index);
drivers/scsi/fnic/vnic_rq.c
117
rq->to_use = rq->to_clean =
drivers/scsi/fnic/vnic_rq.c
118
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
drivers/scsi/fnic/vnic_rq.c
120
iowrite32(fetch_index, &rq->ctrl->posted_index);
drivers/scsi/fnic/vnic_rq.c
122
rq->buf_index = 0;
drivers/scsi/fnic/vnic_rq.c
125
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.c
127
return ioread32(&rq->ctrl->error_status);
drivers/scsi/fnic/vnic_rq.c
130
void vnic_rq_enable(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.c
132
iowrite32(1, &rq->ctrl->enable);
drivers/scsi/fnic/vnic_rq.c
135
int vnic_rq_disable(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.c
139
iowrite32(0, &rq->ctrl->enable);
drivers/scsi/fnic/vnic_rq.c
143
if (!(ioread32(&rq->ctrl->running)))
drivers/scsi/fnic/vnic_rq.c
148
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
drivers/scsi/fnic/vnic_rq.c
15
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.c
153
void vnic_rq_clean(struct vnic_rq *rq,
drivers/scsi/fnic/vnic_rq.c
154
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
drivers/scsi/fnic/vnic_rq.c
159
WARN_ON(ioread32(&rq->ctrl->enable));
drivers/scsi/fnic/vnic_rq.c
161
buf = rq->to_clean;
drivers/scsi/fnic/vnic_rq.c
163
while (vnic_rq_desc_used(rq) > 0) {
drivers/scsi/fnic/vnic_rq.c
165
(*buf_clean)(rq, buf);
drivers/scsi/fnic/vnic_rq.c
167
buf = rq->to_clean = buf->next;
drivers/scsi/fnic/vnic_rq.c
168
rq->ring.desc_avail++;
drivers/scsi/fnic/vnic_rq.c
172
fetch_index = ioread32(&rq->ctrl->fetch_index);
drivers/scsi/fnic/vnic_rq.c
173
rq->to_use = rq->to_clean =
drivers/scsi/fnic/vnic_rq.c
174
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
drivers/scsi/fnic/vnic_rq.c
176
iowrite32(fetch_index, &rq->ctrl->posted_index);
drivers/scsi/fnic/vnic_rq.c
178
rq->buf_index = 0;
drivers/scsi/fnic/vnic_rq.c
18
unsigned int i, j, count = rq->ring.desc_count;
drivers/scsi/fnic/vnic_rq.c
180
vnic_dev_clear_desc_ring(&rq->ring);
drivers/scsi/fnic/vnic_rq.c
22
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
drivers/scsi/fnic/vnic_rq.c
23
if (!rq->bufs[i]) {
drivers/scsi/fnic/vnic_rq.c
30
buf = rq->bufs[i];
drivers/scsi/fnic/vnic_rq.c
33
buf->desc = (u8 *)rq->ring.descs +
drivers/scsi/fnic/vnic_rq.c
34
rq->ring.desc_size * buf->index;
drivers/scsi/fnic/vnic_rq.c
36
buf->next = rq->bufs[0];
drivers/scsi/fnic/vnic_rq.c
39
buf->next = rq->bufs[i + 1];
drivers/scsi/fnic/vnic_rq.c
47
rq->to_use = rq->to_clean = rq->bufs[0];
drivers/scsi/fnic/vnic_rq.c
48
rq->buf_index = 0;
drivers/scsi/fnic/vnic_rq.c
53
void vnic_rq_free(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.c
58
vdev = rq->vdev;
drivers/scsi/fnic/vnic_rq.c
60
vnic_dev_free_desc_ring(vdev, &rq->ring);
drivers/scsi/fnic/vnic_rq.c
63
kfree(rq->bufs[i]);
drivers/scsi/fnic/vnic_rq.c
64
rq->bufs[i] = NULL;
drivers/scsi/fnic/vnic_rq.c
67
rq->ctrl = NULL;
drivers/scsi/fnic/vnic_rq.c
70
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
drivers/scsi/fnic/vnic_rq.c
75
rq->index = index;
drivers/scsi/fnic/vnic_rq.c
76
rq->vdev = vdev;
drivers/scsi/fnic/vnic_rq.c
78
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
drivers/scsi/fnic/vnic_rq.c
79
if (!rq->ctrl) {
drivers/scsi/fnic/vnic_rq.c
84
vnic_rq_disable(rq);
drivers/scsi/fnic/vnic_rq.c
86
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
drivers/scsi/fnic/vnic_rq.c
90
err = vnic_rq_alloc_bufs(rq);
drivers/scsi/fnic/vnic_rq.c
92
vnic_rq_free(rq);
drivers/scsi/fnic/vnic_rq.c
99
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
drivers/scsi/fnic/vnic_rq.h
102
return rq->ring.desc_count - rq->ring.desc_avail - 1;
drivers/scsi/fnic/vnic_rq.h
105
static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.h
107
return rq->to_use->desc;
drivers/scsi/fnic/vnic_rq.h
110
static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.h
112
return rq->to_use->index;
drivers/scsi/fnic/vnic_rq.h
115
static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.h
117
return rq->buf_index++;
drivers/scsi/fnic/vnic_rq.h
120
static inline void vnic_rq_post(struct vnic_rq *rq,
drivers/scsi/fnic/vnic_rq.h
124
struct vnic_rq_buf *buf = rq->to_use;
drivers/scsi/fnic/vnic_rq.h
132
rq->to_use = buf;
drivers/scsi/fnic/vnic_rq.h
133
rq->ring.desc_avail--;
drivers/scsi/fnic/vnic_rq.h
149
iowrite32(buf->index, &rq->ctrl->posted_index);
drivers/scsi/fnic/vnic_rq.h
153
static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.h
155
return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
drivers/scsi/fnic/vnic_rq.h
158
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
drivers/scsi/fnic/vnic_rq.h
160
rq->ring.desc_avail += count;
drivers/scsi/fnic/vnic_rq.h
168
static inline void vnic_rq_service(struct vnic_rq *rq,
drivers/scsi/fnic/vnic_rq.h
170
int desc_return, void (*buf_service)(struct vnic_rq *rq,
drivers/scsi/fnic/vnic_rq.h
177
buf = rq->to_clean;
drivers/scsi/fnic/vnic_rq.h
182
(*buf_service)(rq, cq_desc, buf, skipped, opaque);
drivers/scsi/fnic/vnic_rq.h
185
rq->ring.desc_avail++;
drivers/scsi/fnic/vnic_rq.h
187
rq->to_clean = buf->next;
drivers/scsi/fnic/vnic_rq.h
192
buf = rq->to_clean;
drivers/scsi/fnic/vnic_rq.h
196
static inline int vnic_rq_fill(struct vnic_rq *rq,
drivers/scsi/fnic/vnic_rq.h
197
int (*buf_fill)(struct vnic_rq *rq))
drivers/scsi/fnic/vnic_rq.h
201
while (vnic_rq_desc_avail(rq) > 1) {
drivers/scsi/fnic/vnic_rq.h
203
err = (*buf_fill)(rq);
drivers/scsi/fnic/vnic_rq.h
211
void vnic_rq_free(struct vnic_rq *rq);
drivers/scsi/fnic/vnic_rq.h
212
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
drivers/scsi/fnic/vnic_rq.h
214
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
drivers/scsi/fnic/vnic_rq.h
217
unsigned int vnic_rq_error_status(struct vnic_rq *rq);
drivers/scsi/fnic/vnic_rq.h
218
void vnic_rq_enable(struct vnic_rq *rq);
drivers/scsi/fnic/vnic_rq.h
219
int vnic_rq_disable(struct vnic_rq *rq);
drivers/scsi/fnic/vnic_rq.h
220
void vnic_rq_clean(struct vnic_rq *rq,
drivers/scsi/fnic/vnic_rq.h
221
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
drivers/scsi/fnic/vnic_rq.h
93
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
drivers/scsi/fnic/vnic_rq.h
96
return rq->ring.desc_avail;
drivers/scsi/fnic/vnic_rq.h
99
static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
drivers/scsi/hisi_sas/hisi_sas_main.c
212
struct request *rq)
drivers/scsi/hisi_sas/hisi_sas_main.c
217
if (rq)
drivers/scsi/hisi_sas/hisi_sas_main.c
218
return rq->tag + HISI_SAS_RESERVED_IPTT;
drivers/scsi/hisi_sas/hisi_sas_main.c
504
struct request *rq = NULL;
drivers/scsi/hisi_sas/hisi_sas_main.c
559
rq = sas_task_find_rq(task);
drivers/scsi/hisi_sas/hisi_sas_main.c
560
if (rq) {
drivers/scsi/hisi_sas/hisi_sas_main.c
564
blk_tag = blk_mq_unique_tag(rq);
drivers/scsi/hisi_sas/hisi_sas_main.c
620
rc = hisi_sas_slot_index_alloc(hisi_hba, rq);
drivers/scsi/hosts.c
610
static bool scsi_host_check_in_flight(struct request *rq, void *data)
drivers/scsi/hosts.c
613
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/hosts.c
705
static bool complete_all_cmds_iter(struct request *rq, void *data)
drivers/scsi/hosts.c
707
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/hpsa.c
1000
rq->current_entry++;
drivers/scsi/hpsa.c
1006
if (rq->current_entry == h->max_commands) {
drivers/scsi/hpsa.c
1007
rq->current_entry = 0;
drivers/scsi/hpsa.c
1008
rq->wraparound ^= 1;
drivers/scsi/hpsa.c
5974
int rq;
drivers/scsi/hpsa.c
5993
for (rq = first_queue; rq <= last_queue; rq++) {
drivers/scsi/hpsa.c
5994
rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
drivers/scsi/hpsa.c
990
struct reply_queue_buffer *rq = &h->reply_queue[q];
drivers/scsi/hpsa.c
998
if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
drivers/scsi/hpsa.c
999
a = rq->head[rq->current_entry];
drivers/scsi/hpsa.h
491
struct reply_queue_buffer *rq = &h->reply_queue[q];
drivers/scsi/hpsa.h
507
if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
drivers/scsi/hpsa.h
508
register_value = rq->head[rq->current_entry];
drivers/scsi/hpsa.h
509
rq->current_entry++;
drivers/scsi/hpsa.h
515
if (rq->current_entry == h->max_commands) {
drivers/scsi/hpsa.h
516
rq->current_entry = 0;
drivers/scsi/hpsa.h
517
rq->wraparound ^= 1;
drivers/scsi/hpsa.h
593
struct reply_queue_buffer *rq = &h->reply_queue[q];
drivers/scsi/hpsa.h
597
register_value = rq->head[rq->current_entry];
drivers/scsi/hpsa.h
599
rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
drivers/scsi/hpsa.h
600
if (++rq->current_entry == rq->size)
drivers/scsi/hpsa.h
601
rq->current_entry = 0;
drivers/scsi/hpsa.h
609
writel((q << 24) | rq->current_entry, h->vaddr +
drivers/scsi/lpfc/lpfc_ct.c
3201
struct lpfc_dmabuf *rq, *rsp;
drivers/scsi/lpfc/lpfc_ct.c
3223
rq = kmalloc_obj(*rq);
drivers/scsi/lpfc/lpfc_ct.c
3224
if (!rq)
drivers/scsi/lpfc/lpfc_ct.c
3227
rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
drivers/scsi/lpfc/lpfc_ct.c
3228
if (!rq->virt)
drivers/scsi/lpfc/lpfc_ct.c
3240
INIT_LIST_HEAD(&rq->list);
drivers/scsi/lpfc/lpfc_ct.c
3244
memset(rq->virt, 0, LPFC_BPL_SIZE);
drivers/scsi/lpfc/lpfc_ct.c
3253
CtReq = (struct lpfc_sli_ct_request *)rq->virt;
drivers/scsi/lpfc/lpfc_ct.c
3421
bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
drivers/scsi/lpfc/lpfc_ct.c
3422
bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
drivers/scsi/lpfc/lpfc_ct.c
3431
if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
drivers/scsi/lpfc/lpfc_ct.c
3439
lpfc_mbuf_free(phba, rq->virt, rq->phys);
drivers/scsi/lpfc/lpfc_ct.c
3441
kfree(rq);
drivers/scsi/lpfc/lpfc_init.c
10911
lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
drivers/scsi/lpfc/lpfc_init.c
10917
rqbp = rq->rqbp;
drivers/scsi/mpi3mr/mpi3mr_os.c
4575
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/mpi3mr/mpi3mr_os.c
4584
mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ,
drivers/scsi/mpi3mr/mpi3mr_os.c
491
static bool mpi3mr_print_scmd(struct request *rq, void *data)
drivers/scsi/mpi3mr/mpi3mr_os.c
494
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/mpi3mr/mpi3mr_os.c
5136
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/mpi3mr/mpi3mr_os.c
522
static bool mpi3mr_flush_scmd(struct request *rq, void *data)
drivers/scsi/mpi3mr/mpi3mr_os.c
5227
iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
drivers/scsi/mpi3mr/mpi3mr_os.c
525
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/mpi3mr/mpi3mr_os.c
561
static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
drivers/scsi/mpi3mr/mpi3mr_os.c
565
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/mpi3mr/mpi3mr_os.c
593
static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
drivers/scsi/mpi3mr/mpi3mr_os.c
597
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
5430
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/mpt3sas/mpt3sas_scsih.c
5513
class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
drivers/scsi/mvsas/mv_sas.c
687
struct request *rq;
drivers/scsi/mvsas/mv_sas.c
752
rq = sas_task_find_rq(task);
drivers/scsi/mvsas/mv_sas.c
753
if (rq) {
drivers/scsi/mvsas/mv_sas.c
754
tag = rq->tag + MVS_RSVD_SLOTS;
drivers/scsi/myrb.c
1266
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/myrb.c
1290
mbox->type3.id = rq->tag + 3;
drivers/scsi/myrb.c
1309
if (rq->timeout <= 10)
drivers/scsi/myrb.c
1311
else if (rq->timeout <= 60)
drivers/scsi/myrb.c
1313
else if (rq->timeout <= 600)
drivers/scsi/myrs.c
1587
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/myrs.c
1634
timeout = rq->timeout;
drivers/scsi/myrs.c
1650
mbox->SCSI_10.id = rq->tag + 3;
drivers/scsi/myrs.c
1653
if (rq->cmd_flags & REQ_FUA)
drivers/scsi/myrs.c
1696
mbox->SCSI_255.id = rq->tag + 3;
drivers/scsi/myrs.c
1699
if (rq->cmd_flags & REQ_FUA)
drivers/scsi/pm8001/pm8001_sas.h
747
struct request *rq = NULL;
drivers/scsi/pm8001/pm8001_sas.h
751
rq = sas_task_find_rq(task);
drivers/scsi/pm8001/pm8001_sas.h
753
if (rq) {
drivers/scsi/pm8001/pm8001_sas.h
754
tag = rq->tag + PM8001_RESERVE_SLOT;
drivers/scsi/pm8001/pm80xx_hwi.c
4324
struct request *rq = sas_task_find_rq(task);
drivers/scsi/pm8001/pm80xx_hwi.c
4326
if (!rq)
drivers/scsi/pm8001/pm80xx_hwi.c
4329
return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(rq));
drivers/scsi/scsi_bsg.c
16
struct request *rq;
drivers/scsi/scsi_bsg.c
28
rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
drivers/scsi/scsi_bsg.c
30
if (IS_ERR(rq))
drivers/scsi/scsi_bsg.c
31
return PTR_ERR(rq);
drivers/scsi/scsi_bsg.c
32
rq->timeout = timeout;
drivers/scsi/scsi_bsg.c
34
scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_bsg.c
50
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
drivers/scsi/scsi_bsg.c
53
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
drivers/scsi/scsi_bsg.c
60
bio = rq->bio;
drivers/scsi/scsi_bsg.c
61
blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
drivers/scsi/scsi_bsg.c
87
if (rq_data_dir(rq) == READ)
drivers/scsi/scsi_bsg.c
95
blk_mq_free_request(rq);
drivers/scsi/scsi_debug.c
6780
struct request *rq = scsi_cmd_to_rq(cmnd);
drivers/scsi/scsi_debug.c
6781
u32 unique_tag = blk_mq_unique_tag(rq);
drivers/scsi/scsi_debug.c
6809
static bool sdebug_stop_cmnd(struct request *rq, void *data)
drivers/scsi/scsi_debug.c
6811
scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
drivers/scsi/scsi_debug.c
6884
static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
drivers/scsi/scsi_debug.c
6887
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_debug.c
7188
struct request *rq = scsi_cmd_to_rq(cmnd);
drivers/scsi/scsi_debug.c
7189
bool polled = rq->cmd_flags & REQ_POLLED;
drivers/scsi/scsi_debug.c
7543
static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
drivers/scsi/scsi_debug.c
7546
u32 unique_tag = blk_mq_unique_tag(rq);
drivers/scsi/scsi_debug.c
9097
static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
drivers/scsi/scsi_debug.c
9100
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_debug.c
9103
u32 unique_tag = blk_mq_unique_tag(rq);
drivers/scsi/scsi_debugfs.c
54
void scsi_show_rq(struct seq_file *m, struct request *rq)
drivers/scsi/scsi_debugfs.c
56
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_debugfs.c
58
int timeout_ms = jiffies_to_msecs(rq->timeout);
drivers/scsi/scsi_debugfs.h
5
void scsi_show_rq(struct seq_file *m, struct request *rq);
drivers/scsi/scsi_error.c
1076
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/scsi_error.c
1134
ses->rq_crypt_keyslot = rq->crypt_keyslot;
drivers/scsi/scsi_error.c
1135
ses->rq_crypt_ctx = rq->crypt_ctx;
drivers/scsi/scsi_error.c
1137
rq->crypt_keyslot = NULL;
drivers/scsi/scsi_error.c
1138
rq->crypt_ctx = NULL;
drivers/scsi/scsi_error.c
1159
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/scsi_error.c
1176
rq->crypt_keyslot = ses->rq_crypt_keyslot;
drivers/scsi/scsi_error.c
1177
rq->crypt_ctx = ses->rq_crypt_ctx;
drivers/scsi/scsi_error.c
2499
struct request *rq;
drivers/scsi/scsi_error.c
2515
rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
drivers/scsi/scsi_error.c
2517
if (!rq)
drivers/scsi/scsi_error.c
2519
blk_rq_init(NULL, rq);
drivers/scsi/scsi_error.c
2521
scmd = (struct scsi_cmnd *)(rq + 1);
drivers/scsi/scsi_error.c
2582
kfree(rq);
drivers/scsi/scsi_ioctl.c
364
static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
drivers/scsi/scsi_ioctl.c
367
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_ioctl.c
377
rq->timeout = msecs_to_jiffies(hdr->timeout);
drivers/scsi/scsi_ioctl.c
378
if (!rq->timeout)
drivers/scsi/scsi_ioctl.c
379
rq->timeout = sdev->sg_timeout;
drivers/scsi/scsi_ioctl.c
380
if (!rq->timeout)
drivers/scsi/scsi_ioctl.c
381
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
drivers/scsi/scsi_ioctl.c
382
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
drivers/scsi/scsi_ioctl.c
383
rq->timeout = BLK_MIN_SG_TIMEOUT;
drivers/scsi/scsi_ioctl.c
388
static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
drivers/scsi/scsi_ioctl.c
391
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_ioctl.c
433
struct request *rq;
drivers/scsi/scsi_ioctl.c
457
rq = scsi_alloc_request(sdev->request_queue, writing ?
drivers/scsi/scsi_ioctl.c
459
if (IS_ERR(rq))
drivers/scsi/scsi_ioctl.c
460
return PTR_ERR(rq);
drivers/scsi/scsi_ioctl.c
461
scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_ioctl.c
468
ret = scsi_fill_sghdr_rq(sdev, rq, hdr, open_for_write);
drivers/scsi/scsi_ioctl.c
472
ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len,
drivers/scsi/scsi_ioctl.c
474
hdr->iovec_count, 0, rq_data_dir(rq));
drivers/scsi/scsi_ioctl.c
478
bio = rq->bio;
drivers/scsi/scsi_ioctl.c
483
blk_execute_rq(rq, at_head);
drivers/scsi/scsi_ioctl.c
487
ret = scsi_complete_sghdr_rq(rq, hdr, bio);
drivers/scsi/scsi_ioctl.c
490
blk_mq_free_request(rq);
drivers/scsi/scsi_ioctl.c
523
struct request *rq;
drivers/scsi/scsi_ioctl.c
552
rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
drivers/scsi/scsi_ioctl.c
553
if (IS_ERR(rq)) {
drivers/scsi/scsi_ioctl.c
554
err = PTR_ERR(rq);
drivers/scsi/scsi_ioctl.c
557
scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_ioctl.c
582
rq->timeout = FORMAT_UNIT_TIMEOUT;
drivers/scsi/scsi_ioctl.c
586
rq->timeout = START_STOP_TIMEOUT;
drivers/scsi/scsi_ioctl.c
589
rq->timeout = MOVE_MEDIUM_TIMEOUT;
drivers/scsi/scsi_ioctl.c
592
rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
drivers/scsi/scsi_ioctl.c
595
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
drivers/scsi/scsi_ioctl.c
599
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
drivers/scsi/scsi_ioctl.c
604
err = blk_rq_map_kern(rq, buffer, bytes, GFP_NOIO);
drivers/scsi/scsi_ioctl.c
609
blk_execute_rq(rq, false);
drivers/scsi/scsi_ioctl.c
625
blk_mq_free_request(rq);
drivers/scsi/scsi_lib.c
1111
struct request *rq)
drivers/scsi/scsi_lib.c
1113
return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
drivers/scsi/scsi_lib.c
1114
!op_is_write(req_op(rq)) &&
drivers/scsi/scsi_lib.c
1115
sdev->host->hostt->dma_need_drain(rq);
drivers/scsi/scsi_lib.c
1133
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/scsi_lib.c
1134
unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
drivers/scsi/scsi_lib.c
1137
bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
drivers/scsi/scsi_lib.c
116
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/scsi_lib.c
1161
count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
drivers/scsi/scsi_lib.c
1163
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
drivers/scsi/scsi_lib.c
1165
(rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
drivers/scsi/scsi_lib.c
118
if (rq->rq_flags & RQF_DONTPREP) {
drivers/scsi/scsi_lib.c
1183
cmd->sdb.length = blk_rq_payload_bytes(rq);
drivers/scsi/scsi_lib.c
1185
if (blk_integrity_rq(rq)) {
drivers/scsi/scsi_lib.c
119
rq->rq_flags &= ~RQF_DONTPREP;
drivers/scsi/scsi_lib.c
1199
rq->nr_integrity_segments,
drivers/scsi/scsi_lib.c
1206
count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
drivers/scsi/scsi_lib.c
1226
static void scsi_initialize_rq(struct request *rq)
drivers/scsi/scsi_lib.c
1228
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_lib.c
125
blk_mq_requeue_request(rq, false);
drivers/scsi/scsi_lib.c
1250
struct request *rq;
drivers/scsi/scsi_lib.c
1252
rq = blk_mq_alloc_request(q, opf, flags);
drivers/scsi/scsi_lib.c
1253
if (!IS_ERR(rq))
drivers/scsi/scsi_lib.c
1254
scsi_initialize_rq(rq);
drivers/scsi/scsi_lib.c
1255
return rq;
drivers/scsi/scsi_lib.c
1263
static void scsi_cleanup_rq(struct request *rq)
drivers/scsi/scsi_lib.c
1265
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_lib.c
1269
if (rq->rq_flags & RQF_DONTPREP) {
drivers/scsi/scsi_lib.c
127
blk_mq_delay_kick_requeue_list(rq->q, msecs);
drivers/scsi/scsi_lib.c
1271
rq->rq_flags &= ~RQF_DONTPREP;
drivers/scsi/scsi_lib.c
1278
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/scsi_lib.c
1280
if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
drivers/scsi/scsi_lib.c
1282
scsi_initialize_rq(rq);
drivers/scsi/scsi_lib.c
1541
static void scsi_complete(struct request *rq)
drivers/scsi/scsi_lib.c
1543
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_lib.c
1546
if (blk_mq_is_reserved_rq(rq)) {
drivers/scsi/scsi_lib.c
1550
__blk_mq_end_request(rq, scsi_result_to_blk_status(cmd->result));
drivers/scsi/scsi_lib.c
1832
struct request *req = bd->rq;
drivers/scsi/scsi_lib.c
1951
static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
drivers/scsi/scsi_lib.c
1955
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_lib.c
1979
static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
drivers/scsi/scsi_lib.c
1983
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_lib.c
2162
struct request *rq;
drivers/scsi/scsi_lib.c
2164
rq = scsi_alloc_request(sdev->request_queue, op, flags);
drivers/scsi/scsi_lib.c
2165
if (IS_ERR(rq))
drivers/scsi/scsi_lib.c
2167
scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/scsi_lib.c
741
static unsigned int scsi_rq_err_bytes(const struct request *rq)
drivers/scsi/scsi_lib.c
743
blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
drivers/scsi/scsi_lib.c
747
if (!(rq->rq_flags & RQF_MIXED_MERGE))
drivers/scsi/scsi_lib.c
748
return blk_rq_bytes(rq);
drivers/scsi/scsi_lib.c
757
for (bio = rq->bio; bio; bio = bio->bi_next) {
drivers/scsi/scsi_lib.c
764
BUG_ON(blk_rq_bytes(rq) && !bytes);
drivers/scsi/scsi_logging.c
31
const struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/scsi_logging.c
33
if (!rq->q || !rq->q->disk)
drivers/scsi/scsi_logging.c
35
return rq->q->disk->disk_name;
drivers/scsi/sd.c
1037
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1038
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1039
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
drivers/scsi/sd.c
1040
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
drivers/scsi/sd.c
1043
if (!sd_set_special_bvec(rq, data_len))
drivers/scsi/sd.c
1055
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
drivers/scsi/sd.c
1064
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1065
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1066
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
drivers/scsi/sd.c
1067
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
drivers/scsi/sd.c
1070
if (!sd_set_special_bvec(rq, data_len))
drivers/scsi/sd.c
1082
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
drivers/scsi/sd.c
1089
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1091
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1092
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
drivers/scsi/sd.c
1093
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
drivers/scsi/sd.c
1095
if (!(rq->cmd_flags & REQ_NOUNMAP)) {
drivers/scsi/sd.c
1105
rq->rq_flags |= RQF_QUIET;
drivers/scsi/sd.c
1189
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1190
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1205
rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
drivers/scsi/sd.c
1224
const struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1225
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1230
return min3((u32)rq->bio->bi_write_hint,
drivers/scsi/sd.c
1358
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1360
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1361
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
drivers/scsi/sd.c
1363
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
drivers/scsi/sd.c
1365
bool write = rq_data_dir(rq) == WRITE;
drivers/scsi/sd.c
1382
if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
drivers/scsi/sd.c
1387
if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
drivers/scsi/sd.c
1408
fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
drivers/scsi/sd.c
1421
} else if (rq->cmd_flags & REQ_ATOMIC) {
drivers/scsi/sd.c
1429
sdp->use_10_for_rw || protect || rq->bio->bi_write_hint) {
drivers/scsi/sd.c
1453
(unsigned long long)blk_rq_pos(rq),
drivers/scsi/sd.c
1454
blk_rq_sectors(rq)));
drivers/scsi/sd.c
1459
blk_rq_sectors(rq)));
drivers/scsi/sd.c
1472
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
1474
switch (req_op(rq)) {
drivers/scsi/sd.c
1476
switch (scsi_disk(rq->q->disk)->provisioning_mode) {
drivers/scsi/sd.c
1515
struct request *rq = scsi_cmd_to_rq(SCpnt);
drivers/scsi/sd.c
1517
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
drivers/scsi/sd.c
1518
mempool_free(rq->special_vec.bv_page, sd_page_pool);
drivers/scsi/sd.c
895
struct request *rq = scsi_cmd_to_rq(scmd);
drivers/scsi/sd.c
896
struct bio *bio = rq->bio;
drivers/scsi/sd.c
897
unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
drivers/scsi/sd.c
931
static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
drivers/scsi/sd.c
939
bvec_set_page(&rq->special_vec, page, data_len, 0);
drivers/scsi/sd.c
940
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
drivers/scsi/sd.c
941
return bvec_virt(&rq->special_vec);
drivers/scsi/sd.c
947
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd.c
948
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
949
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
drivers/scsi/sd.c
950
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
drivers/scsi/sd.c
954
buf = sd_set_special_bvec(rq, data_len);
drivers/scsi/sd.c
969
rq->timeout = SD_TIMEOUT;
drivers/scsi/sd_zbc.c
296
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd_zbc.c
297
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd_zbc.c
298
sector_t sector = blk_rq_pos(rq);
drivers/scsi/sd_zbc.c
327
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd_zbc.c
328
sector_t sector = blk_rq_pos(rq);
drivers/scsi/sd_zbc.c
329
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd_zbc.c
346
rq->timeout = SD_TIMEOUT;
drivers/scsi/sd_zbc.c
367
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/scsi/sd_zbc.c
369
if (op_is_zone_mgmt(req_op(rq)) &&
drivers/scsi/sd_zbc.c
378
rq->rq_flags |= RQF_QUIET;
drivers/scsi/sg.c
1313
sg_rq_end_io(struct request *rq, blk_status_t status,
drivers/scsi/sg.c
1316
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/sg.c
1317
struct sg_request *srp = rq->end_io_data;
drivers/scsi/sg.c
135
struct request *rq;
drivers/scsi/sg.c
1381
srp->rq = NULL;
drivers/scsi/sg.c
1382
blk_mq_free_request(rq);
drivers/scsi/sg.c
1725
struct request *rq;
drivers/scsi/sg.c
1753
rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
drivers/scsi/sg.c
1755
if (IS_ERR(rq))
drivers/scsi/sg.c
1756
return PTR_ERR(rq);
drivers/scsi/sg.c
1757
scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/sg.c
1760
blk_mq_free_request(rq);
drivers/scsi/sg.c
1767
srp->rq = rq;
drivers/scsi/sg.c
1768
rq->end_io_data = srp;
drivers/scsi/sg.c
180
static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status,
drivers/scsi/sg.c
1813
res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
drivers/scsi/sg.c
1816
srp->bio = rq->bio;
drivers/scsi/sg.c
1840
if (srp->rq)
drivers/scsi/sg.c
1841
blk_mq_free_request(srp->rq);
drivers/scsi/sg.c
812
blk_mq_free_request(srp->rq);
drivers/scsi/sg.c
813
srp->rq = NULL;
drivers/scsi/sg.c
827
srp->rq->timeout = timeout;
drivers/scsi/sg.c
829
srp->rq->end_io = sg_rq_end_io;
drivers/scsi/sg.c
830
blk_execute_rq_nowait(srp->rq, at_head);
drivers/scsi/smartpqi/smartpqi_init.c
5581
struct request *rq;
drivers/scsi/smartpqi/smartpqi_init.c
5654
rq = scsi_cmd_to_rq(scmd);
drivers/scsi/smartpqi/smartpqi_init.c
5655
timeout = rq->timeout / HZ;
drivers/scsi/sr.c
301
struct request *rq = scsi_cmd_to_rq(SCpnt);
drivers/scsi/sr.c
302
struct scsi_cd *cd = scsi_cd(rq->q->disk);
drivers/scsi/sr.c
324
if (rq->bio != NULL)
drivers/scsi/sr.c
325
block_sectors = bio_sectors(rq->bio);
drivers/scsi/sr.c
331
good_bytes = (error_sector - blk_rq_pos(rq)) << 9;
drivers/scsi/sr.c
363
struct request *rq = scsi_cmd_to_rq(SCpnt);
drivers/scsi/sr.c
369
cd = scsi_cd(rq->q->disk);
drivers/scsi/sr.c
376
"Finishing %u sectors\n", blk_rq_sectors(rq)));
drivers/scsi/sr.c
396
switch (req_op(rq)) {
drivers/scsi/sr.c
407
blk_dump_rq_flags(rq, "Unknown sr command");
drivers/scsi/sr.c
430
if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
drivers/scsi/sr.c
441
(rq_data_dir(rq) == WRITE) ?
drivers/scsi/sr.c
443
this_count, blk_rq_sectors(rq)));
drivers/scsi/sr.c
446
block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
drivers/scsi/sr.c
941
struct request *rq;
drivers/scsi/sr.c
945
rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
drivers/scsi/sr.c
946
if (IS_ERR(rq))
drivers/scsi/sr.c
947
return PTR_ERR(rq);
drivers/scsi/sr.c
948
scmd = blk_mq_rq_to_pdu(rq);
drivers/scsi/sr.c
950
ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
drivers/scsi/sr.c
965
rq->timeout = 60 * HZ;
drivers/scsi/sr.c
966
bio = rq->bio;
drivers/scsi/sr.c
968
blk_execute_rq(rq, false);
drivers/scsi/sr.c
981
blk_mq_free_request(rq);
drivers/scsi/virtio_scsi.c
541
struct request *rq = scsi_cmd_to_rq(sc);
drivers/scsi/virtio_scsi.c
546
if (!rq || !scsi_prot_sg_count(sc))
drivers/scsi/virtio_scsi.c
549
bi = blk_get_integrity(rq->q->disk);
drivers/scsi/virtio_scsi.c
554
blk_rq_sectors(rq)));
drivers/scsi/virtio_scsi.c
558
blk_rq_sectors(rq)));
drivers/staging/octeon/ethernet-mdio.c
54
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
drivers/staging/octeon/ethernet-mdio.c
62
return phy_mii_ioctl(dev->phydev, rq, cmd);
drivers/staging/octeon/ethernet-mdio.h
25
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
drivers/target/loopback/tcm_loop.c
273
static bool tcm_loop_flush_work_iter(struct request *rq, void *data)
drivers/target/loopback/tcm_loop.c
275
struct scsi_cmnd *sc = blk_mq_rq_to_pdu(rq);
drivers/tty/ipwireless/hardware.c
1734
struct ipw_rx_packet *rp, *rq;
drivers/tty/ipwireless/hardware.c
1750
list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
drivers/tty/ipwireless/hardware.c
1755
list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
drivers/ufs/core/ufs-mcq.c
533
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/ufs/core/ufs-mcq.c
545
hwq = ufshcd_mcq_req_to_hwq(hba, rq);
drivers/ufs/core/ufshcd-crypto.h
16
static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
drivers/ufs/core/ufshcd-crypto.h
19
if (!rq || !rq->crypt_keyslot) {
drivers/ufs/core/ufshcd-crypto.h
24
lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot);
drivers/ufs/core/ufshcd-crypto.h
25
lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0];
drivers/ufs/core/ufshcd-crypto.h
79
static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
drivers/ufs/core/ufshcd-priv.h
378
struct request *rq = blk_mq_tag_to_rq(tags, tag);
drivers/ufs/core/ufshcd-priv.h
380
if (WARN_ON_ONCE(!rq))
drivers/ufs/core/ufshcd-priv.h
383
return blk_mq_rq_to_pdu(rq);
drivers/ufs/core/ufshcd.c
2914
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/ufs/core/ufshcd.c
2915
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
drivers/ufs/core/ufshcd.c
3123
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/ufs/core/ufshcd.c
3126
hba->mcq_enabled ? ufshcd_mcq_req_to_hwq(hba, rq) : NULL;
drivers/ufs/core/ufshcd.c
3302
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/ufs/core/ufshcd.c
3305
rq->timeout = timeout;
drivers/ufs/core/ufshcd.c
3306
sts = blk_execute_rq(rq, true);
drivers/ufs/core/ufshcd.c
414
struct utp_upiu_req *rq = lrb->ucd_req_ptr;
drivers/ufs/core/ufshcd.c
421
header = &rq->header;
drivers/ufs/core/ufshcd.c
425
trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
drivers/ufs/core/ufshcd.c
488
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/ufs/core/ufshcd.c
489
unsigned int tag = rq->tag;
drivers/ufs/core/ufshcd.c
513
transfer_len = blk_rq_bytes(rq);
drivers/ufs/core/ufshcd.c
520
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
drivers/ufs/core/ufshcd.c
5733
static bool ufshcd_mcq_force_compl_one(struct request *rq, void *priv)
drivers/ufs/core/ufshcd.c
5735
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/ufs/core/ufshcd.c
5736
struct scsi_device *sdev = rq->q->queuedata;
drivers/ufs/core/ufshcd.c
5739
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
drivers/ufs/core/ufshcd.c
5741
if (blk_mq_is_reserved_rq(rq) || !hwq)
drivers/ufs/core/ufshcd.c
5761
static bool ufshcd_mcq_compl_one(struct request *rq, void *priv)
drivers/ufs/core/ufshcd.c
5763
struct scsi_device *sdev = rq->q->queuedata;
drivers/ufs/core/ufshcd.c
5766
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
drivers/ufs/core/ufshcd.c
5768
if (!blk_mq_is_reserved_rq(rq) && hwq)
drivers/ufs/core/ufshcd.c
6648
static bool ufshcd_abort_one(struct request *rq, void *priv)
drivers/ufs/core/ufshcd.c
6651
u32 tag = rq->tag;
drivers/ufs/core/ufshcd.c
6652
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
drivers/ufs/core/ufshcd.c
6657
if (blk_mq_is_reserved_rq(rq))
drivers/ufs/core/ufshcd.c
7795
struct request *rq = scsi_cmd_to_rq(cmd);
drivers/ufs/core/ufshcd.c
7796
int tag = rq->tag;
drivers/ufs/core/ufshcd.c
7879
if (blk_mq_is_reserved_rq(rq))
drivers/usb/misc/uss720.c
103
rq = urb->context;
drivers/usb/misc/uss720.c
104
priv = rq->priv;
drivers/usb/misc/uss720.c
109
} else if (rq->dr->bRequest == 3) {
drivers/usb/misc/uss720.c
110
memcpy(priv->reg, rq->reg, sizeof(priv->reg));
drivers/usb/misc/uss720.c
116
if (rq->reg[2] & rq->reg[1] & 0x10 && pp)
drivers/usb/misc/uss720.c
119
complete(&rq->compl);
drivers/usb/misc/uss720.c
120
kref_put(&rq->ref_count, destroy_async);
drivers/usb/misc/uss720.c
128
struct uss720_async_request *rq;
drivers/usb/misc/uss720.c
137
rq = kzalloc_obj(struct uss720_async_request, mem_flags);
drivers/usb/misc/uss720.c
138
if (!rq)
drivers/usb/misc/uss720.c
140
kref_init(&rq->ref_count);
drivers/usb/misc/uss720.c
141
INIT_LIST_HEAD(&rq->asynclist);
drivers/usb/misc/uss720.c
142
init_completion(&rq->compl);
drivers/usb/misc/uss720.c
144
rq->priv = priv;
drivers/usb/misc/uss720.c
145
rq->urb = usb_alloc_urb(0, mem_flags);
drivers/usb/misc/uss720.c
146
if (!rq->urb) {
drivers/usb/misc/uss720.c
147
kref_put(&rq->ref_count, destroy_async);
drivers/usb/misc/uss720.c
150
rq->dr = kmalloc_obj(*rq->dr, mem_flags);
drivers/usb/misc/uss720.c
151
if (!rq->dr) {
drivers/usb/misc/uss720.c
152
kref_put(&rq->ref_count, destroy_async);
drivers/usb/misc/uss720.c
155
rq->dr->bRequestType = requesttype;
drivers/usb/misc/uss720.c
156
rq->dr->bRequest = request;
drivers/usb/misc/uss720.c
157
rq->dr->wValue = cpu_to_le16(value);
drivers/usb/misc/uss720.c
158
rq->dr->wIndex = cpu_to_le16(index);
drivers/usb/misc/uss720.c
159
rq->dr->wLength = cpu_to_le16((request == 3) ? sizeof(rq->reg) : 0);
drivers/usb/misc/uss720.c
160
usb_fill_control_urb(rq->urb, usbdev, (requesttype & 0x80) ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0),
drivers/usb/misc/uss720.c
161
(unsigned char *)rq->dr,
drivers/usb/misc/uss720.c
162
(request == 3) ? rq->reg : NULL, (request == 3) ? sizeof(rq->reg) : 0, async_complete, rq);
drivers/usb/misc/uss720.c
165
list_add_tail(&rq->asynclist, &priv->asynclist);
drivers/usb/misc/uss720.c
167
kref_get(&rq->ref_count);
drivers/usb/misc/uss720.c
168
ret = usb_submit_urb(rq->urb, mem_flags);
drivers/usb/misc/uss720.c
170
return rq;
drivers/usb/misc/uss720.c
171
destroy_async(&rq->ref_count);
drivers/usb/misc/uss720.c
178
struct uss720_async_request *rq;
drivers/usb/misc/uss720.c
183
list_for_each_entry(rq, &priv->asynclist, asynclist) {
drivers/usb/misc/uss720.c
184
usb_unlink_urb(rq->urb);
drivers/usb/misc/uss720.c
196
struct uss720_async_request *rq;
drivers/usb/misc/uss720.c
205
rq = submit_async_request(priv, 3, 0xc0, ((unsigned int)reg) << 8, 0, mem_flags);
drivers/usb/misc/uss720.c
206
if (!rq) {
drivers/usb/misc/uss720.c
212
kref_put(&rq->ref_count, destroy_async);
drivers/usb/misc/uss720.c
215
if (wait_for_completion_timeout(&rq->compl, HZ)) {
drivers/usb/misc/uss720.c
216
ret = rq->urb->status;
drivers/usb/misc/uss720.c
221
kref_put(&rq->ref_count, destroy_async);
drivers/usb/misc/uss720.c
232
struct uss720_async_request *rq;
drivers/usb/misc/uss720.c
237
rq = submit_async_request(priv, 4, 0x40, (((unsigned int)reg) << 8) | val, 0, mem_flags);
drivers/usb/misc/uss720.c
238
if (!rq) {
drivers/usb/misc/uss720.c
243
kref_put(&rq->ref_count, destroy_async);
drivers/usb/misc/uss720.c
80
struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count);
drivers/usb/misc/uss720.c
81
struct parport_uss720_private *priv = rq->priv;
drivers/usb/misc/uss720.c
84
if (likely(rq->urb))
drivers/usb/misc/uss720.c
85
usb_free_urb(rq->urb);
drivers/usb/misc/uss720.c
86
kfree(rq->dr);
drivers/usb/misc/uss720.c
88
list_del_init(&rq->asynclist);
drivers/usb/misc/uss720.c
90
kfree(rq);
drivers/usb/misc/uss720.c
98
struct uss720_async_request *rq;
drivers/vfio/pci/mlx5/cmd.c
1245
qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr);
drivers/vfio/pci/mlx5/cmd.c
1247
log_rq_sz = ilog2(qp->rq.wqe_cnt);
drivers/vfio/pci/mlx5/cmd.c
1253
mlx5_init_fbc(qp->buf.frags, log_rq_stride, log_rq_sz, &qp->rq.fbc);
drivers/vfio/pci/mlx5/cmd.c
1256
qp->rq.db = &qp->db.db[MLX5_RCV_DBR];
drivers/vfio/pci/mlx5/cmd.c
1314
WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt);
drivers/vfio/pci/mlx5/cmd.c
1315
ix = qp->rq.pc & (qp->rq.wqe_cnt - 1);
drivers/vfio/pci/mlx5/cmd.c
1316
data = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ix);
drivers/vfio/pci/mlx5/cmd.c
1320
qp->rq.pc++;
drivers/vfio/pci/mlx5/cmd.c
1323
*qp->rq.db = cpu_to_be32(qp->rq.pc & 0xffff);
drivers/vfio/pci/mlx5/cmd.c
1352
for (i = 0; i < qp->rq.wqe_cnt; i++) {
drivers/vfio/pci/mlx5/cmd.c
1662
qp->rq.cc++;
drivers/vfio/pci/mlx5/cmd.c
1665
ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1);
drivers/vfio/pci/mlx5/cmd.h
152
} rq;
drivers/video/fbdev/xen-fbfront.c
321
static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
drivers/virtio/virtio_rtc_ptp.c
217
struct ptp_clock_request *rq, int on)
fs/dlm/lock.c
135
#define modes_compat(gr, rq) \
fs/dlm/lock.c
136
__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
fs/dlm/lock.c
2596
static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
fs/dlm/lock.c
2598
if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
fs/dlm/lock.c
2599
(gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
fs/dlm/lock.c
2605
if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
fs/erofs/compress.h
26
const char *(*decompress)(struct z_erofs_decompress_req *rq,
fs/erofs/compress.h
62
struct z_erofs_decompress_req *rq;
fs/erofs/compress.h
75
const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
fs/erofs/compress.h
79
int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
101
victim = __erofs_allocpage(pagepool, rq->gfp, true);
fs/erofs/decompressor.c
106
rq->out[i] = victim;
fs/erofs/decompressor.c
111
static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
124
if (!rq->inplace_io) {
fs/erofs/decompressor.c
125
if (rq->inpages <= 1) {
fs/erofs/decompressor.c
130
src = erofs_vm_map_ram(rq->in, rq->inpages);
fs/erofs/decompressor.c
145
oend = rq->pageofs_out + rq->outputsize;
fs/erofs/decompressor.c
147
if (!rq->partial_decoding && may_inplace &&
fs/erofs/decompressor.c
148
omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
fs/erofs/decompressor.c
149
for (i = 0; i < rq->inpages; ++i)
fs/erofs/decompressor.c
150
if (rq->out[rq->outpages - rq->inpages + i] !=
fs/erofs/decompressor.c
151
rq->in[i])
fs/erofs/decompressor.c
153
if (i >= rq->inpages) {
fs/erofs/decompressor.c
156
return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
fs/erofs/decompressor.c
163
src = z_erofs_get_gbuf(rq->inpages);
fs/erofs/decompressor.c
170
for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
fs/erofs/decompressor.c
171
cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
fs/erofs/decompressor.c
188
const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
196
rq->inputsize -= padend - padbuf;
fs/erofs/decompressor.c
197
rq->pageofs_in += padend - padbuf;
fs/erofs/decompressor.c
201
static const char *__z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
210
headpage = kmap_local_page(*rq->in);
fs/erofs/decompressor.c
211
reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
fs/erofs/decompressor.c
212
min_t(unsigned int, rq->inputsize,
fs/erofs/decompressor.c
213
rq->sb->s_blocksize - rq->pageofs_in));
fs/erofs/decompressor.c
218
may_inplace = !((rq->pageofs_in + rq->inputsize) &
fs/erofs/decompressor.c
219
(rq->sb->s_blocksize - 1));
fs/erofs/decompressor.c
221
inputmargin = rq->pageofs_in;
fs/erofs/decompressor.c
222
src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
fs/erofs/decompressor.c
227
out = dst + rq->pageofs_out;
fs/erofs/decompressor.c
228
if (rq->partial_decoding)
fs/erofs/decompressor.c
230
rq->inputsize, rq->outputsize, rq->outputsize);
fs/erofs/decompressor.c
233
rq->inputsize, rq->outputsize);
fs/erofs/decompressor.c
234
if (ret == rq->outputsize)
fs/erofs/decompressor.c
244
vm_unmap_ram(src, rq->inpages);
fs/erofs/decompressor.c
254
static const char *z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
263
if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
fs/erofs/decompressor.c
264
DBG_BUGON(!*rq->out);
fs/erofs/decompressor.c
265
dst = kmap_local_page(*rq->out);
fs/erofs/decompressor.c
269
ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
fs/erofs/decompressor.c
273
dst = page_address(*rq->out);
fs/erofs/decompressor.c
276
dst = erofs_vm_map_ram(rq->out, rq->outpages);
fs/erofs/decompressor.c
282
reason = __z_erofs_lz4_decompress(rq, dst);
fs/erofs/decompressor.c
286
vm_unmap_ram(dst, rq->outpages);
fs/erofs/decompressor.c
290
static const char *z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
293
const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
fs/erofs/decompressor.c
294
const unsigned int bs = rq->sb->s_blocksize;
fs/erofs/decompressor.c
298
if (rq->outputsize > rq->inputsize)
fs/erofs/decompressor.c
300
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
fs/erofs/decompressor.c
301
cur = bs - (rq->pageofs_out & (bs - 1));
fs/erofs/decompressor.c
302
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
fs/erofs/decompressor.c
303
cur = min(cur, rq->outputsize);
fs/erofs/decompressor.c
304
if (cur && rq->out[0]) {
fs/erofs/decompressor.c
305
kin = kmap_local_page(rq->in[nrpages_in - 1]);
fs/erofs/decompressor.c
306
if (rq->out[0] == rq->in[nrpages_in - 1])
fs/erofs/decompressor.c
307
memmove(kin + rq->pageofs_out, kin + pi, cur);
fs/erofs/decompressor.c
309
memcpy_to_page(rq->out[0], rq->pageofs_out,
fs/erofs/decompressor.c
313
rq->outputsize -= cur;
fs/erofs/decompressor.c
316
for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
fs/erofs/decompressor.c
317
insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
fs/erofs/decompressor.c
318
rq->outputsize -= insz;
fs/erofs/decompressor.c
319
if (!rq->in[ni])
fs/erofs/decompressor.c
321
kin = kmap_local_page(rq->in[ni]);
fs/erofs/decompressor.c
324
no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
fs/erofs/decompressor.c
325
po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
fs/erofs/decompressor.c
328
if (rq->out[no] == rq->in[ni])
fs/erofs/decompressor.c
330
kin + rq->pageofs_in + pi, cnt);
fs/erofs/decompressor.c
331
else if (rq->out[no])
fs/erofs/decompressor.c
332
memcpy_to_page(rq->out[no], po,
fs/erofs/decompressor.c
333
kin + rq->pageofs_in + pi, cnt);
fs/erofs/decompressor.c
345
struct z_erofs_decompress_req *rq = dctx->rq;
fs/erofs/decompressor.c
350
if (++dctx->no >= rq->outpages || !rq->outputsize)
fs/erofs/decompressor.c
355
dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
fs/erofs/decompressor.c
356
rq->outputsize -= dctx->avail_out;
fs/erofs/decompressor.c
357
pgo = &rq->out[dctx->no];
fs/erofs/decompressor.c
358
if (!*pgo && rq->fillgaps) { /* deduped */
fs/erofs/decompressor.c
359
*pgo = erofs_allocpage(pgpl, rq->gfp);
fs/erofs/decompressor.c
368
*dst = dctx->kout + rq->pageofs_out;
fs/erofs/decompressor.c
372
rq->pageofs_out = 0;
fs/erofs/decompressor.c
375
if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
fs/erofs/decompressor.c
376
if (++dctx->ni >= rq->inpages)
fs/erofs/decompressor.c
382
dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
fs/erofs/decompressor.c
383
rq->inputsize -= dctx->inbuf_sz;
fs/erofs/decompressor.c
384
dctx->kin = kmap_local_page(rq->in[dctx->ni]);
fs/erofs/decompressor.c
389
dctx->kout = kmap_local_page(rq->out[dctx->no]);
fs/erofs/decompressor.c
401
if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
fs/erofs/decompressor.c
407
for (j = dctx->ni + 1; j < rq->inpages; ++j) {
fs/erofs/decompressor.c
408
if (rq->out[dctx->no] != rq->in[j])
fs/erofs/decompressor.c
410
tmppage = erofs_allocpage(pgpl, rq->gfp);
fs/erofs/decompressor.c
414
copy_highpage(tmppage, rq->in[j]);
fs/erofs/decompressor.c
415
rq->in[j] = tmppage;
fs/erofs/decompressor.c
53
static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor.c
60
EROFS_SB(rq->sb)->lz4.max_distance_pages;
fs/erofs/decompressor.c
65
for (i = j = 0; i < rq->outpages; ++i, ++j) {
fs/erofs/decompressor.c
66
struct page *const page = rq->out[i];
fs/erofs/decompressor.c
73
if (!rq->fillgaps && test_bit(j, bounced)) {
fs/erofs/decompressor.c
76
availables[top++] = rq->out[i - lz4_max_distance_pages];
fs/erofs/decompressor_crypto.c
105
for (i = 0; i < rq->outpages; i++) {
fs/erofs/decompressor_crypto.c
106
struct page *const page = rq->out[i];
fs/erofs/decompressor_crypto.c
110
victim = __erofs_allocpage(pgpl, rq->gfp, true);
fs/erofs/decompressor_crypto.c
116
rq->out[i] = victim;
fs/erofs/decompressor_crypto.c
119
err = __z_erofs_crypto_decompress(rq, tfm);
fs/erofs/decompressor_crypto.c
16
headpage = kmap_local_page(*rq->in);
fs/erofs/decompressor_crypto.c
17
reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
fs/erofs/decompressor_crypto.c
18
min_t(unsigned int, rq->inputsize,
fs/erofs/decompressor_crypto.c
19
rq->sb->s_blocksize - rq->pageofs_in));
fs/erofs/decompressor_crypto.c
28
ret = sg_alloc_table_from_pages_segment(&st_src, rq->in, rq->inpages,
fs/erofs/decompressor_crypto.c
29
rq->pageofs_in, rq->inputsize, UINT_MAX, GFP_KERNEL);
fs/erofs/decompressor_crypto.c
33
ret = sg_alloc_table_from_pages_segment(&st_dst, rq->out, rq->outpages,
fs/erofs/decompressor_crypto.c
34
rq->pageofs_out, rq->outputsize, UINT_MAX, GFP_KERNEL);
fs/erofs/decompressor_crypto.c
39
st_dst.sgl, rq->inputsize, rq->outputsize);
fs/erofs/decompressor_crypto.c
47
erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
fs/erofs/decompressor_crypto.c
48
ret, rq->inputsize, rq->pageofs_in, rq->outputsize);
fs/erofs/decompressor_crypto.c
6
static int __z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor_crypto.c
92
int z_erofs_crypto_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor_crypto.c
99
tfm = z_erofs_crypto_get_engine(rq->alg);
fs/erofs/decompressor_deflate.c
102
struct super_block *sb = rq->sb;
fs/erofs/decompressor_deflate.c
103
struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
fs/erofs/decompressor_deflate.c
109
dctx.kin = kmap_local_page(*rq->in);
fs/erofs/decompressor_deflate.c
110
reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
fs/erofs/decompressor_deflate.c
111
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
fs/erofs/decompressor_deflate.c
136
rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */
fs/erofs/decompressor_deflate.c
137
strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
fs/erofs/decompressor_deflate.c
138
rq->inputsize -= strm->z.avail_in;
fs/erofs/decompressor_deflate.c
139
strm->z.next_in = dctx.kin + rq->pageofs_in;
fs/erofs/decompressor_deflate.c
155
if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) {
fs/erofs/decompressor_deflate.c
156
if (zerr == Z_OK && rq->partial_decoding)
fs/erofs/decompressor_deflate.c
158
if (zerr == Z_STREAM_END && !rq->outputsize)
fs/erofs/decompressor_deflate.c
181
static const char *z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor_deflate.c
187
if (!rq->partial_decoding) {
fs/erofs/decompressor_deflate.c
188
err = z_erofs_crypto_decompress(rq, pgpl);
fs/erofs/decompressor_deflate.c
194
return __z_erofs_deflate_decompress(rq, pgpl);
fs/erofs/decompressor_deflate.c
99
static const char *__z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor_lzma.c
149
static const char *z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor_lzma.c
152
struct super_block *sb = rq->sb;
fs/erofs/decompressor_lzma.c
153
struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
fs/erofs/decompressor_lzma.c
160
dctx.kin = kmap_local_page(*rq->in);
fs/erofs/decompressor_lzma.c
161
reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
fs/erofs/decompressor_lzma.c
162
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
fs/erofs/decompressor_lzma.c
181
xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize,
fs/erofs/decompressor_lzma.c
182
!rq->partial_decoding);
fs/erofs/decompressor_lzma.c
183
buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in);
fs/erofs/decompressor_lzma.c
184
rq->inputsize -= buf.in_size;
fs/erofs/decompressor_lzma.c
185
buf.in = dctx.kin + rq->pageofs_in;
fs/erofs/decompressor_lzma.c
208
if (xz_err == XZ_STREAM_END && !rq->outputsize)
fs/erofs/decompressor_zstd.c
138
static const char *z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
fs/erofs/decompressor_zstd.c
141
struct super_block *sb = rq->sb;
fs/erofs/decompressor_zstd.c
142
struct z_erofs_stream_dctx dctx = { .rq = rq, .no = -1, .ni = 0 };
fs/erofs/decompressor_zstd.c
151
dctx.kin = kmap_local_page(*rq->in);
fs/erofs/decompressor_zstd.c
152
reason = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in,
fs/erofs/decompressor_zstd.c
153
min(rq->inputsize, sb->s_blocksize - rq->pageofs_in));
fs/erofs/decompressor_zstd.c
169
rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */
fs/erofs/decompressor_zstd.c
170
in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in);
fs/erofs/decompressor_zstd.c
171
rq->inputsize -= in_buf.size;
fs/erofs/decompressor_zstd.c
172
in_buf.src = dctx.kin + rq->pageofs_in;
fs/erofs/decompressor_zstd.c
193
((rq->outputsize + dctx.avail_out) && (!zerr || (zerr > 0 &&
fs/erofs/decompressor_zstd.c
194
!(rq->inputsize + in_buf.size - in_buf.pos))))) {
fs/erofs/decompressor_zstd.c
199
} while (rq->outputsize + dctx.avail_out);
fs/erofs/fileio.c
125
if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
fs/erofs/fileio.c
128
erofs_fileio_rq_submit(io->rq);
fs/erofs/fileio.c
129
io->rq = NULL;
fs/erofs/fileio.c
132
if (!io->rq) {
fs/erofs/fileio.c
140
io->rq = erofs_fileio_rq_alloc(&io->dev);
fs/erofs/fileio.c
141
io->rq->bio.bi_iter.bi_sector =
fs/erofs/fileio.c
145
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
fs/erofs/fileio.c
166
erofs_fileio_rq_submit(io.rq);
fs/erofs/fileio.c
188
erofs_fileio_rq_submit(io.rq);
fs/erofs/fileio.c
19
struct erofs_fileio_rq *rq;
fs/erofs/fileio.c
24
struct erofs_fileio_rq *rq =
fs/erofs/fileio.c
28
if (ret >= 0 && ret != rq->bio.bi_iter.bi_size)
fs/erofs/fileio.c
30
if (!rq->bio.bi_end_io) {
fs/erofs/fileio.c
31
bio_for_each_folio_all(fi, &rq->bio) {
fs/erofs/fileio.c
35
} else if (ret < 0 && !rq->bio.bi_status) {
fs/erofs/fileio.c
36
rq->bio.bi_status = errno_to_blk_status(ret);
fs/erofs/fileio.c
38
bio_endio(&rq->bio);
fs/erofs/fileio.c
39
bio_uninit(&rq->bio);
fs/erofs/fileio.c
40
if (refcount_dec_and_test(&rq->ref))
fs/erofs/fileio.c
41
kfree(rq);
fs/erofs/fileio.c
44
static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
fs/erofs/fileio.c
49
if (!rq)
fs/erofs/fileio.c
51
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
fs/erofs/fileio.c
52
rq->iocb.ki_ioprio = get_current_ioprio();
fs/erofs/fileio.c
53
rq->iocb.ki_complete = erofs_fileio_ki_complete;
fs/erofs/fileio.c
54
if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
fs/erofs/fileio.c
55
rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
fs/erofs/fileio.c
56
rq->iocb.ki_flags = IOCB_DIRECT;
fs/erofs/fileio.c
57
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
fs/erofs/fileio.c
58
rq->bio.bi_iter.bi_size);
fs/erofs/fileio.c
59
scoped_with_creds(rq->iocb.ki_filp->f_cred)
fs/erofs/fileio.c
60
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
fs/erofs/fileio.c
62
erofs_fileio_ki_complete(&rq->iocb, ret);
fs/erofs/fileio.c
63
if (refcount_dec_and_test(&rq->ref))
fs/erofs/fileio.c
64
kfree(rq);
fs/erofs/fileio.c
69
struct erofs_fileio_rq *rq = kzalloc_obj(*rq, GFP_KERNEL | __GFP_NOFAIL);
fs/erofs/fileio.c
71
bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
fs/erofs/fileio.c
72
rq->iocb.ki_filp = mdev->m_dif->file;
fs/erofs/fileio.c
73
rq->sb = mdev->m_sb;
fs/erofs/fileio.c
74
refcount_set(&rq->ref, 2);
fs/erofs/fileio.c
75
return rq;
fs/nfsd/nfsd.h
171
static inline int nfsd_v4client(struct svc_rqst *rq)
fs/nfsd/nfsd.h
173
return rq && rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
fs/smb/client/compress.c
279
bool should_compress(const struct cifs_tcon *tcon, const struct smb_rqst *rq)
fs/smb/client/compress.c
281
const struct smb2_hdr *shdr = rq->rq_iov->iov_base;
fs/smb/client/compress.c
293
const struct smb2_write_req *wreq = rq->rq_iov->iov_base;
fs/smb/client/compress.c
298
return is_compressible(&rq->rq_iter);
fs/smb/client/compress.c
304
int smb_compress(struct TCP_Server_Info *server, struct smb_rqst *rq, compress_send_fn send_fn)
fs/smb/client/compress.c
311
if (!server || !rq || !rq->rq_iov || !rq->rq_iov->iov_base)
fs/smb/client/compress.c
314
if (rq->rq_iov->iov_len != sizeof(struct smb2_write_req))
fs/smb/client/compress.c
317
slen = iov_iter_count(&rq->rq_iter);
fs/smb/client/compress.c
325
iter = rq->rq_iter;
fs/smb/client/compress.c
353
hdr.Offset = cpu_to_le32(rq->rq_iov[0].iov_len);
fs/smb/client/compress.c
357
iov[1] = rq->rq_iov[0];
fs/smb/client/compress.c
365
ret = send_fn(server, 1, rq);
fs/smb/client/compress.h
33
int smb_compress(struct TCP_Server_Info *server, struct smb_rqst *rq,
fs/smb/client/compress.h
35
bool should_compress(const struct cifs_tcon *tcon, const struct smb_rqst *rq);
fs/smb/server/smb2pdu.c
57
#define WORK_BUFFERS(w, rq, rs) __wbuf((w), (void **)&(rq), (void **)&(rs))
include/drm/gpu_scheduler.h
110
struct drm_sched_rq *rq;
include/linux/blk-integrity.h
103
static inline struct bio_vec rq_integrity_vec(struct request *rq)
include/linux/blk-integrity.h
105
return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
include/linux/blk-integrity.h
106
rq->bio->bi_integrity->bip_iter);
include/linux/blk-integrity.h
124
static inline int blk_rq_integrity_map_user(struct request *rq,
include/linux/blk-integrity.h
171
static inline bool blk_integrity_rq(const struct request *rq)
include/linux/blk-integrity.h
176
static inline struct bio_vec rq_integrity_vec(struct request *rq)
include/linux/blk-integrity.h
37
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
include/linux/blk-integrity.h
94
static inline bool blk_integrity_rq(const struct request *rq)
include/linux/blk-integrity.h
96
return rq->cmd_flags & REQ_INTEGRITY;
include/linux/blk-mq.h
1015
static inline void *blk_mq_rq_to_pdu(struct request *rq)
include/linux/blk-mq.h
1017
return rq + 1;
include/linux/blk-mq.h
1039
static inline void blk_mq_cleanup_rq(struct request *rq)
include/linux/blk-mq.h
1041
if (rq->q->mq_ops->cleanup_rq)
include/linux/blk-mq.h
1042
rq->q->mq_ops->cleanup_rq(rq);
include/linux/blk-mq.h
1048
static inline bool rq_is_sync(struct request *rq)
include/linux/blk-mq.h
1050
return op_is_sync(rq->cmd_flags);
include/linux/blk-mq.h
1053
void blk_rq_init(struct request_queue *q, struct request *rq);
include/linux/blk-mq.h
1054
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
include/linux/blk-mq.h
1057
void blk_rq_unprep_clone(struct request *rq);
include/linux/blk-mq.h
1058
blk_status_t blk_insert_cloned_request(struct request *rq);
include/linux/blk-mq.h
1076
int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
include/linux/blk-mq.h
1078
int blk_rq_append_bio(struct request *rq, struct bio *bio);
include/linux/blk-mq.h
1079
void blk_execute_rq_nowait(struct request *rq, bool at_head);
include/linux/blk-mq.h
1080
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
include/linux/blk-mq.h
1081
bool blk_rq_is_poll(struct request *rq);
include/linux/blk-mq.h
1088
#define __rq_for_each_bio(_bio, rq) \
include/linux/blk-mq.h
1089
if ((rq->bio)) \
include/linux/blk-mq.h
1090
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
include/linux/blk-mq.h
1112
static inline sector_t blk_rq_pos(const struct request *rq)
include/linux/blk-mq.h
1114
return rq->__sector;
include/linux/blk-mq.h
1117
static inline unsigned int blk_rq_bytes(const struct request *rq)
include/linux/blk-mq.h
1119
return rq->__data_len;
include/linux/blk-mq.h
1122
static inline int blk_rq_cur_bytes(const struct request *rq)
include/linux/blk-mq.h
1124
if (!rq->bio)
include/linux/blk-mq.h
1126
if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
include/linux/blk-mq.h
1127
return rq->bio->bi_iter.bi_size;
include/linux/blk-mq.h
1128
return bio_iovec(rq->bio).bv_len;
include/linux/blk-mq.h
1131
static inline unsigned int blk_rq_sectors(const struct request *rq)
include/linux/blk-mq.h
1133
return blk_rq_bytes(rq) >> SECTOR_SHIFT;
include/linux/blk-mq.h
1136
static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
include/linux/blk-mq.h
1138
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
include/linux/blk-mq.h
1141
static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
include/linux/blk-mq.h
1143
return rq->stats_sectors;
include/linux/blk-mq.h
1152
static inline unsigned int blk_rq_payload_bytes(struct request *rq)
include/linux/blk-mq.h
1154
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
include/linux/blk-mq.h
1155
return rq->special_vec.bv_len;
include/linux/blk-mq.h
1156
return blk_rq_bytes(rq);
include/linux/blk-mq.h
1163
static inline struct bio_vec req_bvec(struct request *rq)
include/linux/blk-mq.h
1165
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
include/linux/blk-mq.h
1166
return rq->special_vec;
include/linux/blk-mq.h
1167
return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
include/linux/blk-mq.h
1170
static inline unsigned int blk_rq_count_bios(struct request *rq)
include/linux/blk-mq.h
1175
__rq_for_each_bio(bio, rq)
include/linux/blk-mq.h
1181
void blk_steal_bios(struct bio_list *list, struct request *rq);
include/linux/blk-mq.h
1189
bool blk_update_request(struct request *rq, blk_status_t error,
include/linux/blk-mq.h
1202
static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
include/linux/blk-mq.h
1204
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
include/linux/blk-mq.h
1206
return rq->nr_phys_segments;
include/linux/blk-mq.h
1213
static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
include/linux/blk-mq.h
1215
return max_t(unsigned short, rq->nr_phys_segments, 1);
include/linux/blk-mq.h
1224
static inline unsigned int blk_rq_nr_bvec(struct request *rq)
include/linux/blk-mq.h
1230
rq_for_each_bvec(bv, rq, rq_iter)
include/linux/blk-mq.h
1236
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
include/linux/blk-mq.h
1238
static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
include/linux/blk-mq.h
1242
return __blk_rq_map_sg(rq, sglist, &last_sg);
include/linux/blk-mq.h
234
static inline bool blk_rq_is_passthrough(struct request *rq)
include/linux/blk-mq.h
236
return blk_op_is_passthrough(rq->cmd_flags);
include/linux/blk-mq.h
246
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
include/linux/blk-mq.h
248
#define rq_dma_dir(rq) \
include/linux/blk-mq.h
249
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
include/linux/blk-mq.h
262
static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
include/linux/blk-mq.h
264
rq->rq_next = NULL;
include/linux/blk-mq.h
272
static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
include/linux/blk-mq.h
274
rq->rq_next = rl->head;
include/linux/blk-mq.h
282
struct request *rq = rl->head;
include/linux/blk-mq.h
284
if (rq) {
include/linux/blk-mq.h
288
rq->rq_next = NULL;
include/linux/blk-mq.h
566
struct request *rq;
include/linux/blk-mq.h
685
void (*show_rq)(struct seq_file *m, struct request *rq);
include/linux/blk-mq.h
750
void blk_mq_free_request(struct request *rq);
include/linux/blk-mq.h
751
int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
include/linux/blk-mq.h
810
u32 blk_mq_unique_tag(struct request *rq);
include/linux/blk-mq.h
826
static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
include/linux/blk-mq.h
828
return READ_ONCE(rq->state);
include/linux/blk-mq.h
831
static inline int blk_mq_request_started(struct request *rq)
include/linux/blk-mq.h
833
return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
include/linux/blk-mq.h
836
static inline int blk_mq_request_completed(struct request *rq)
include/linux/blk-mq.h
838
return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
include/linux/blk-mq.h
848
static inline void blk_mq_set_request_complete(struct request *rq)
include/linux/blk-mq.h
850
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
include/linux/blk-mq.h
857
static inline void blk_mq_complete_request_direct(struct request *rq,
include/linux/blk-mq.h
858
void (*complete)(struct request *rq))
include/linux/blk-mq.h
860
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
include/linux/blk-mq.h
861
complete(rq);
include/linux/blk-mq.h
864
void blk_mq_start_request(struct request *rq);
include/linux/blk-mq.h
865
void blk_mq_end_request(struct request *rq, blk_status_t error);
include/linux/blk-mq.h
866
void __blk_mq_end_request(struct request *rq, blk_status_t error);
include/linux/blk-mq.h
873
static inline bool blk_mq_need_time_stamp(struct request *rq)
include/linux/blk-mq.h
875
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
include/linux/blk-mq.h
878
static inline bool blk_mq_is_reserved_rq(struct request *rq)
include/linux/blk-mq.h
880
return rq->rq_flags & RQF_RESV;
include/linux/blk-mq.h
926
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
include/linux/blk-mq.h
929
void blk_mq_complete_request(struct request *rq);
include/linux/blk-mq.h
930
bool blk_mq_complete_request_remote(struct request *rq);
include/linux/blk-mq.h
981
unsigned int blk_mq_rq_cpu(struct request *rq);
include/linux/blkdev.h
697
#define blk_noretry_request(rq) \
include/linux/blkdev.h
698
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
include/linux/blktrace_api.h
116
static inline sector_t blk_rq_trace_sector(struct request *rq)
include/linux/blktrace_api.h
122
if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
include/linux/blktrace_api.h
124
return blk_rq_pos(rq);
include/linux/blktrace_api.h
127
static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
include/linux/blktrace_api.h
129
return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
include/linux/blktrace_api.h
76
extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
include/linux/blktrace_api.h
86
# define blk_add_driver_data(rq, data, len) do {} while (0)
include/linux/device-mapper.h
64
struct request *rq,
include/linux/fsl/ptp_qoriq.h
195
struct ptp_clock_request *rq, int on);
include/linux/io_uring/cmd.h
185
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
include/linux/libata.h
1155
bool ata_scsi_dma_need_drain(struct request *rq);
include/linux/mii.h
51
static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
include/linux/mii.h
53
return (struct mii_ioctl_data *) &rq->ifr_ifru;
include/linux/mtd/blktrans.h
32
struct request_queue *rq;
include/linux/pci.h
1456
int pcie_set_readrq(struct pci_dev *dev, int rq);
include/linux/sched.h
2382
struct rq;
include/linux/sched.h
2383
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
include/linux/sched.h
738
struct rq *rq;
include/linux/sched.h
81
struct rq;
include/linux/sched/nohz.h
18
void calc_load_nohz_remote(struct rq *rq);
include/linux/sched/nohz.h
22
static inline void calc_load_nohz_remote(struct rq *rq) { }
include/linux/t10-pi.h
40
static inline u32 t10_pi_ref_tag(struct request *rq)
include/linux/t10-pi.h
42
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
include/linux/t10-pi.h
45
rq->q->limits.integrity.interval_exp)
include/linux/t10-pi.h
46
shift = rq->q->limits.integrity.interval_exp;
include/linux/t10-pi.h
47
return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
include/linux/t10-pi.h
65
static inline u64 ext_pi_ref_tag(struct request *rq)
include/linux/t10-pi.h
67
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
include/linux/t10-pi.h
70
rq->q->limits.integrity.interval_exp)
include/linux/t10-pi.h
71
shift = rq->q->limits.integrity.interval_exp;
include/linux/t10-pi.h
72
return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT));
include/linux/usb/usbnet.h
294
extern int usbnet_mii_ioctl(struct net_device *net, struct ifreq *rq, int cmd);
include/net/mana/gdma.h
115
} rq;
include/rdma/rdmavt_qp.h
277
static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
include/rdma/rdmavt_qp.h
282
count += rq->size;
include/rdma/rdmavt_qp.h
454
struct rvt_rq rq;
include/rdma/rdmavt_qp.h
545
static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
include/rdma/rdmavt_qp.h
548
((char *)rq->kwq->curr_wq +
include/rdma/rdmavt_qp.h
550
rq->max_sge * sizeof(struct ib_sge)) * n);
include/rdma/rdmavt_qp.h
955
static inline void rvt_free_rq(struct rvt_rq *rq)
include/rdma/rdmavt_qp.h
957
kvfree(rq->kwq);
include/rdma/rdmavt_qp.h
958
rq->kwq = NULL;
include/rdma/rdmavt_qp.h
959
vfree(rq->wq);
include/rdma/rdmavt_qp.h
960
rq->wq = NULL;
include/scsi/scsi_cmnd.h
306
struct request *rq = blk_mq_rq_from_pdu(scmd);
include/scsi/scsi_cmnd.h
308
return t10_pi_ref_tag(rq);
include/scsi/scsi_host.h
319
bool (* dma_need_drain)(struct request *rq);
include/soc/mscc/ocelot_ptp.h
56
struct ptp_clock_request *rq, int on);
include/trace/events/block.h
100
__entry->sector = blk_rq_trace_sector(rq);
include/trace/events/block.h
101
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
include/trace/events/block.h
102
__entry->ioprio = req_get_ioprio(rq);
include/trace/events/block.h
104
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
include/trace/events/block.h
120
TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
include/trace/events/block.h
122
TP_ARGS(rq, error, nr_bytes),
include/trace/events/block.h
135
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
include/trace/events/block.h
136
__entry->sector = blk_rq_pos(rq);
include/trace/events/block.h
139
__entry->ioprio = req_get_ioprio(rq);
include/trace/events/block.h
141
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
include/trace/events/block.h
169
TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
include/trace/events/block.h
171
TP_ARGS(rq, error, nr_bytes)
include/trace/events/block.h
185
TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes),
include/trace/events/block.h
187
TP_ARGS(rq, error, nr_bytes)
include/trace/events/block.h
192
TP_PROTO(struct request *rq),
include/trace/events/block.h
194
TP_ARGS(rq),
include/trace/events/block.h
208
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
include/trace/events/block.h
209
__entry->sector = blk_rq_trace_sector(rq);
include/trace/events/block.h
210
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
include/trace/events/block.h
211
__entry->bytes = blk_rq_bytes(rq);
include/trace/events/block.h
212
__entry->ioprio = req_get_ioprio(rq);
include/trace/events/block.h
214
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
include/trace/events/block.h
240
TP_PROTO(struct request *rq),
include/trace/events/block.h
242
TP_ARGS(rq)
include/trace/events/block.h
254
TP_PROTO(struct request *rq),
include/trace/events/block.h
256
TP_ARGS(rq)
include/trace/events/block.h
268
TP_PROTO(struct request *rq),
include/trace/events/block.h
270
TP_ARGS(rq)
include/trace/events/block.h
281
TP_PROTO(struct request *rq),
include/trace/events/block.h
283
TP_ARGS(rq)
include/trace/events/block.h
294
TP_PROTO(struct request *rq),
include/trace/events/block.h
296
TP_ARGS(rq)
include/trace/events/block.h
414
TP_PROTO(struct request *rq),
include/trace/events/block.h
415
TP_ARGS(rq)
include/trace/events/block.h
570
TP_PROTO(struct request *rq, dev_t dev, sector_t from),
include/trace/events/block.h
572
TP_ARGS(rq, dev, from),
include/trace/events/block.h
585
__entry->dev = disk_devt(rq->q->disk);
include/trace/events/block.h
586
__entry->sector = blk_rq_pos(rq);
include/trace/events/block.h
587
__entry->nr_sector = blk_rq_sectors(rq);
include/trace/events/block.h
590
__entry->nr_bios = blk_rq_count_bios(rq);
include/trace/events/block.h
591
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
include/trace/events/block.h
85
TP_PROTO(struct request *rq),
include/trace/events/block.h
87
TP_ARGS(rq),
include/trace/events/block.h
99
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
include/trace/events/nbd.h
64
struct request *rq),
include/trace/events/nbd.h
66
TP_ARGS(nbd_request, index, rq),
include/trace/events/nbd.h
77
__entry->request = rq;
include/trace/events/nbd.h
97
struct request *rq),
include/trace/events/nbd.h
99
TP_ARGS(nbd_request, index, rq),
include/trace/events/sched.h
838
TP_PROTO(struct rq *rq),
include/trace/events/sched.h
839
TP_ARGS(rq));
include/trace/events/sched.h
842
TP_PROTO(struct rq *rq),
include/trace/events/sched.h
843
TP_ARGS(rq));
include/trace/events/sched.h
846
TP_PROTO(struct rq *rq),
include/trace/events/sched.h
847
TP_ARGS(rq));
include/trace/events/sched.h
850
TP_PROTO(struct rq *rq),
include/trace/events/sched.h
851
TP_ARGS(rq));
include/trace/events/sched.h
858
TP_PROTO(struct rq *rq),
include/trace/events/sched.h
859
TP_ARGS(rq));
include/trace/events/sched.h
874
TP_PROTO(struct rq *rq, int change),
include/trace/events/sched.h
875
TP_ARGS(rq, change));
include/uapi/rdma/ionic-abi.h
100
struct ionic_qdesc rq;
include/uapi/rdma/ionic-abi.h
80
struct ionic_qdesc rq;
io_uring/rsrc.c
927
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
io_uring/rsrc.c
962
imu = io_alloc_imu(ctx, blk_rq_nr_phys_segments(rq));
io_uring/rsrc.c
970
imu->len = blk_rq_bytes(rq);
io_uring/rsrc.c
975
imu->priv = rq;
io_uring/rsrc.c
977
imu->dir = 1 << rq_data_dir(rq);
io_uring/rsrc.c
979
rq_for_each_bvec(bv, rq, rq_iter)
kernel/sched/core.c
10323
void call_trace_sched_update_nr_running(struct rq *rq, int count)
kernel/sched/core.c
10325
trace_sched_update_nr_running_tp(rq, count);
kernel/sched/core.c
10586
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
10589
guard(rq_lock_irq)(rq);
kernel/sched/core.c
10596
if (rq->curr->mm == mm && rq->curr->mm_cid.active)
kernel/sched/core.c
10597
mm_cid_transit_to_task(rq->curr, pcp);
kernel/sched/core.c
10601
} else if (rq->curr->mm == mm && rq->curr->mm_cid.active) {
kernel/sched/core.c
10602
unsigned int cid = rq->curr->mm_cid.cid;
kernel/sched/core.c
10607
rq->curr->mm_cid.cid = cid;
kernel/sched/core.c
10872
struct rq *rq = task_rq(p);
kernel/sched/core.c
10880
lockdep_assert_rq_held(rq);
kernel/sched/core.c
10883
update_rq_clock(rq);
kernel/sched/core.c
10888
p->sched_class->switching_from(rq, p);
kernel/sched/core.c
10895
.running = task_current_donor(rq, p),
kernel/sched/core.c
10900
ctx->prio = p->sched_class->get_prio(rq, p);
kernel/sched/core.c
10906
dequeue_task(rq, p, flags);
kernel/sched/core.c
10908
put_prev_task(rq, p);
kernel/sched/core.c
10911
p->sched_class->switched_from(rq, p);
kernel/sched/core.c
10919
struct rq *rq = task_rq(p);
kernel/sched/core.c
10921
lockdep_assert_rq_held(rq);
kernel/sched/core.c
10929
p->sched_class->switching_to(rq, p);
kernel/sched/core.c
10932
enqueue_task(rq, p, ctx->flags);
kernel/sched/core.c
10934
set_next_task(rq, p);
kernel/sched/core.c
10938
p->sched_class->switched_to(rq, p);
kernel/sched/core.c
10948
rq->next_class->wakeup_preempt(rq, p, 0);
kernel/sched/core.c
10949
rq->next_class = p->sched_class;
kernel/sched/core.c
10956
resched_curr(rq);
kernel/sched/core.c
10959
p->sched_class->prio_changed(rq, p, ctx->prio);
kernel/sched/core.c
1112
static void __resched_curr(struct rq *rq, int tif)
kernel/sched/core.c
1114
struct task_struct *curr = rq->curr;
kernel/sched/core.c
1118
lockdep_assert_rq_held(rq);
kernel/sched/core.c
1130
cpu = cpu_of(rq);
kernel/sched/core.c
1154
void resched_curr(struct rq *rq)
kernel/sched/core.c
1156
__resched_curr(rq, TIF_NEED_RESCHED);
kernel/sched/core.c
1180
void resched_curr_lazy(struct rq *rq)
kernel/sched/core.c
1182
__resched_curr(rq, get_lazy_tif_bit());
kernel/sched/core.c
1187
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
1190
raw_spin_rq_lock_irqsave(rq, flags);
kernel/sched/core.c
1192
resched_curr(rq);
kernel/sched/core.c
1193
raw_spin_rq_unlock_irqrestore(rq, flags);
kernel/sched/core.c
1249
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
126
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
kernel/sched/core.c
1276
if (set_nr_and_not_polling(task_thread_info(rq->idle), TIF_NEED_RESCHED))
kernel/sched/core.c
1315
struct rq *rq = info;
kernel/sched/core.c
1316
int cpu = cpu_of(rq);
kernel/sched/core.c
1325
rq->idle_balance = idle_cpu(cpu);
kernel/sched/core.c
1326
if (rq->idle_balance) {
kernel/sched/core.c
1327
rq->nohz_idle_balance = flags;
kernel/sched/core.c
1335
static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
kernel/sched/core.c
1337
if (rq->nr_running != 1)
kernel/sched/core.c
1349
bool sched_can_stop_tick(struct rq *rq)
kernel/sched/core.c
1354
if (rq->dl.dl_nr_running)
kernel/sched/core.c
1361
if (rq->rt.rr_nr_running) {
kernel/sched/core.c
1362
if (rq->rt.rr_nr_running == 1)
kernel/sched/core.c
1372
fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
kernel/sched/core.c
1381
if (scx_enabled() && !scx_can_stop_tick(rq))
kernel/sched/core.c
1384
if (rq->cfs.h_nr_queued > 1)
kernel/sched/core.c
1394
if (__need_bw_check(rq, rq->curr)) {
kernel/sched/core.c
1395
if (cfs_task_bw_constrained(rq->curr))
kernel/sched/core.c
1530
uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
kernel/sched/core.c
1539
rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
kernel/sched/core.c
1546
static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
kernel/sched/core.c
1550
if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
kernel/sched/core.c
1553
uclamp_rq_set(rq, clamp_id, clamp_value);
kernel/sched/core.c
1557
unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
kernel/sched/core.c
1560
struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
kernel/sched/core.c
1574
return uclamp_idle_value(rq, clamp_id, clamp_value);
kernel/sched/core.c
1675
static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
kernel/sched/core.c
1678
struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
kernel/sched/core.c
1682
lockdep_assert_rq_held(rq);
kernel/sched/core.c
1691
uclamp_idle_reset(rq, clamp_id, uc_se->value);
kernel/sched/core.c
1700
if (uc_se->value > uclamp_rq_get(rq, clamp_id))
kernel/sched/core.c
1701
uclamp_rq_set(rq, clamp_id, uc_se->value);
kernel/sched/core.c
1713
static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
kernel/sched/core.c
1716
struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
kernel/sched/core.c
1722
lockdep_assert_rq_held(rq);
kernel/sched/core.c
1767
rq_clamp = uclamp_rq_get(rq, clamp_id);
kernel/sched/core.c
1774
bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
kernel/sched/core.c
1775
uclamp_rq_set(rq, clamp_id, bkt_clamp);
kernel/sched/core.c
1779
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
1800
uclamp_rq_inc_id(rq, p, clamp_id);
kernel/sched/core.c
1803
if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
kernel/sched/core.c
1804
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
kernel/sched/core.c
1807
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
kernel/sched/core.c
1827
uclamp_rq_dec_id(rq, p, clamp_id);
kernel/sched/core.c
1830
static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
kernel/sched/core.c
1836
uclamp_rq_dec_id(rq, p, clamp_id);
kernel/sched/core.c
1837
uclamp_rq_inc_id(rq, p, clamp_id);
kernel/sched/core.c
1843
if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
kernel/sched/core.c
1844
rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
kernel/sched/core.c
1852
struct rq *rq;
kernel/sched/core.c
1862
rq = task_rq_lock(p, &rf);
kernel/sched/core.c
1871
uclamp_rq_reinc_id(rq, p, clamp_id);
kernel/sched/core.c
1873
task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
2024
static void __init init_uclamp_rq(struct rq *rq)
kernel/sched/core.c
2027
struct uclamp_rq *uc_rq = rq->uclamp;
kernel/sched/core.c
2035
rq->uclamp_flags = UCLAMP_FLAG_IDLE;
kernel/sched/core.c
2064
static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int flags) { }
kernel/sched/core.c
2065
static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
kernel/sched/core.c
2095
void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
2098
update_rq_clock(rq);
kernel/sched/core.c
2105
uclamp_rq_inc(rq, p, flags);
kernel/sched/core.c
2107
p->sched_class->enqueue_task(rq, p, flags);
kernel/sched/core.c
2112
sched_info_enqueue(rq, p);
kernel/sched/core.c
2114
if (sched_core_enabled(rq))
kernel/sched/core.c
2115
sched_core_enqueue(rq, p);
kernel/sched/core.c
2121
inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
2123
if (sched_core_enabled(rq))
kernel/sched/core.c
2124
sched_core_dequeue(rq, p, flags);
kernel/sched/core.c
2127
update_rq_clock(rq);
kernel/sched/core.c
2130
sched_info_dequeue(rq, p);
kernel/sched/core.c
2138
uclamp_rq_dec(rq, p);
kernel/sched/core.c
2139
return p->sched_class->dequeue_task(rq, p, flags);
kernel/sched/core.c
2142
void activate_task(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
2147
enqueue_task(rq, p, flags);
kernel/sched/core.c
2153
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
2165
dequeue_task(rq, p, flags);
kernel/sched/core.c
2168
static void block_task(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
2170
if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags))
kernel/sched/core.c
2171
__block_task(rq, p);
kernel/sched/core.c
2185
void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
2187
struct task_struct *donor = rq->donor;
kernel/sched/core.c
2189
if (p->sched_class == rq->next_class) {
kernel/sched/core.c
2190
rq->next_class->wakeup_preempt(rq, p, flags);
kernel/sched/core.c
2192
} else if (sched_class_above(p->sched_class, rq->next_class)) {
kernel/sched/core.c
2193
rq->next_class->wakeup_preempt(rq, p, flags);
kernel/sched/core.c
2194
resched_curr(rq);
kernel/sched/core.c
2195
rq->next_class = p->sched_class;
kernel/sched/core.c
2202
if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
kernel/sched/core.c
2203
rq_clock_skip_update(rq);
kernel/sched/core.c
2250
struct rq *rq;
kernel/sched/core.c
2259
rq = task_rq(p);
kernel/sched/core.c
2272
while (task_on_cpu(rq, p)) {
kernel/sched/core.c
2283
rq = task_rq_lock(p, &rf);
kernel/sched/core.c
2289
dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
kernel/sched/core.c
2291
running = task_on_cpu(rq, p);
kernel/sched/core.c
2303
task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
2353
static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
kernel/sched/core.c
2356
.new_mask = cpumask_of(rq->cpu),
kernel/sched/core.c
2394
static inline bool rq_has_pinned_tasks(struct rq *rq)
kernel/sched/core.c
2396
return rq->nr_pinned;
kernel/sched/core.c
2448
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
kernel/sched/core.c
2450
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
2452
lockdep_assert_rq_held(rq);
kernel/sched/core.c
2454
deactivate_task(rq, p, DEQUEUE_NOCLOCK);
kernel/sched/core.c
2456
rq_unlock(rq, rf);
kernel/sched/core.c
2458
rq = cpu_rq(new_cpu);
kernel/sched/core.c
2460
rq_lock(rq, rf);
kernel/sched/core.c
2462
activate_task(rq, p, 0);
kernel/sched/core.c
2463
wakeup_preempt(rq, p, 0);
kernel/sched/core.c
2465
return rq;
kernel/sched/core.c
2495
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
kernel/sched/core.c
2497
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
2501
return rq;
kernel/sched/core.c
2503
rq = move_queued_task(rq, rf, p, dest_cpu);
kernel/sched/core.c
2505
return rq;
kernel/sched/core.c
2518
struct rq *rq = this_rq();
kernel/sched/core.c
2538
context_unsafe_alias(rq);
kernel/sched/core.c
2541
rq_lock(rq, &rf);
kernel/sched/core.c
2554
if (task_rq(p) == rq) {
kernel/sched/core.c
2567
update_rq_clock(rq);
kernel/sched/core.c
2568
rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
kernel/sched/core.c
2608
rq_unlock(rq, &rf);
kernel/sched/core.c
2618
rq_unlock(rq, &rf);
kernel/sched/core.c
2629
struct rq *lowest_rq = NULL, *rq = this_rq();
kernel/sched/core.c
2633
raw_spin_rq_lock(rq);
kernel/sched/core.c
2635
if (task_rq(p) != rq)
kernel/sched/core.c
2646
lowest_rq = p->sched_class->find_lock_rq(p, rq);
kernel/sched/core.c
2654
if (task_rq(p) == rq) {
kernel/sched/core.c
2655
move_queued_task_locked(rq, lowest_rq, p);
kernel/sched/core.c
2659
double_unlock_balance(rq, lowest_rq);
kernel/sched/core.c
2662
rq->push_busy = false;
kernel/sched/core.c
2663
raw_spin_rq_unlock(rq);
kernel/sched/core.c
2861
static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
kernel/sched/core.c
2863
__releases(__rq_lockp(rq), &p->pi_lock)
kernel/sched/core.c
2876
(task_current_donor(rq, p) && !task_current(rq, p))) {
kernel/sched/core.c
2880
(p->migration_flags & MDF_PUSH) && !rq->push_busy) {
kernel/sched/core.c
2881
rq->push_busy = true;
kernel/sched/core.c
2896
task_rq_unlock(rq, p, rf);
kernel/sched/core.c
2898
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
kernel/sched/core.c
2899
p, &rq->push_work);
kernel/sched/core.c
2950
task_rq_unlock(rq, p, rf);
kernel/sched/core.c
2954
if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
kernel/sched/core.c
2968
task_rq_unlock(rq, p, rf);
kernel/sched/core.c
2970
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
kernel/sched/core.c
2981
rq = move_queued_task(rq, rf, p, dest_cpu);
kernel/sched/core.c
2988
task_rq_unlock(rq, p, rf);
kernel/sched/core.c
3016
struct rq *rq,
kernel/sched/core.c
3018
__releases(__rq_lockp(rq), &p->pi_lock)
kernel/sched/core.c
302
void sched_core_enqueue(struct rq *rq, struct task_struct *p)
kernel/sched/core.c
307
rq->core->core_task_seq++;
kernel/sched/core.c
3082
return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);
kernel/sched/core.c
3085
task_rq_unlock(rq, p, rf);
kernel/sched/core.c
3102
struct rq *rq;
kernel/sched/core.c
3104
rq = task_rq_lock(p, &rf);
kernel/sched/core.c
3111
cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
kernel/sched/core.c
3112
ctx->new_mask = rq->scratch_mask;
kernel/sched/core.c
3114
return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
kernel/sched/core.c
312
rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less);
kernel/sched/core.c
3146
struct rq *rq;
kernel/sched/core.c
3149
rq = task_rq_lock(p, &rf);
kernel/sched/core.c
315
void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/core.c
3166
return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);
kernel/sched/core.c
3169
task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
320
rq->core->core_task_seq++;
kernel/sched/core.c
323
rb_erase(&p->core_node, &rq->core_tree);
kernel/sched/core.c
3300
struct rq *src_rq, *dst_rq;
kernel/sched/core.c
332
if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 &&
kernel/sched/core.c
333
rq->core->core_forceidle_count && rq->curr == rq->idle)
kernel/sched/core.c
3333
struct rq *src_rq, *dst_rq;
kernel/sched/core.c
334
resched_curr(rq);
kernel/sched/core.c
3591
struct rq *rq;
kernel/sched/core.c
3596
rq = this_rq();
kernel/sched/core.c
3598
if (cpu == rq->cpu) {
kernel/sched/core.c
3599
__schedstat_inc(rq->ttwu_local);
kernel/sched/core.c
3607
for_each_domain(rq->cpu, sd) {
kernel/sched/core.c
3618
__schedstat_inc(rq->ttwu_count);
kernel/sched/core.c
3634
void update_rq_avg_idle(struct rq *rq)
kernel/sched/core.c
3636
u64 delta = rq_clock(rq) - rq->idle_stamp;
kernel/sched/core.c
3637
u64 max = 2*rq->max_idle_balance_cost;
kernel/sched/core.c
3639
update_avg(&rq->avg_idle, delta);
kernel/sched/core.c
3641
if (rq->avg_idle > max)
kernel/sched/core.c
3642
rq->avg_idle = max;
kernel/sched/core.c
3643
rq->idle_stamp = 0;
kernel/sched/core.c
3647
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
kernel/sched/core.c
3652
lockdep_assert_rq_held(rq);
kernel/sched/core.c
3655
rq->nr_uninterruptible--;
kernel/sched/core.c
3667
activate_task(rq, p, en_flags);
kernel/sched/core.c
3668
wakeup_preempt(rq, p, wake_flags);
kernel/sched/core.c
3677
rq_unpin_lock(rq, rf);
kernel/sched/core.c
3678
p->sched_class->task_woken(rq, p);
kernel/sched/core.c
3679
rq_repin_lock(rq, rf);
kernel/sched/core.c
368
static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
kernel/sched/core.c
3711
struct rq *rq;
kernel/sched/core.c
3714
rq = __task_rq_lock(p, &rf);
kernel/sched/core.c
3716
update_rq_clock(rq);
kernel/sched/core.c
3718
enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
kernel/sched/core.c
3719
if (!task_on_cpu(rq, p)) {
kernel/sched/core.c
3724
wakeup_preempt(rq, p, wake_flags);
kernel/sched/core.c
3729
__task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
373
node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp);
kernel/sched/core.c
3737
struct rq *rq = this_rq();
kernel/sched/core.c
3744
rq_lock_irqsave(rq, &rf);
kernel/sched/core.c
3745
update_rq_clock(rq);
kernel/sched/core.c
3751
if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
kernel/sched/core.c
3752
set_task_cpu(p, cpu_of(rq));
kernel/sched/core.c
3754
ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
kernel/sched/core.c
3767
WRITE_ONCE(rq->ttwu_pending, 0);
kernel/sched/core.c
3768
rq_unlock_irqrestore(rq, &rf);
kernel/sched/core.c
378
if (!sched_task_is_throttled(p, rq->cpu))
kernel/sched/core.c
3795
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
3799
WRITE_ONCE(rq->ttwu_pending, 1);
kernel/sched/core.c
3807
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
3810
if (is_idle_task(rcu_dereference(rq->curr))) {
kernel/sched/core.c
3811
guard(rq_lock_irqsave)(rq);
kernel/sched/core.c
3812
if (is_idle_task(rq->curr))
kernel/sched/core.c
3813
resched_curr(rq);
kernel/sched/core.c
3910
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
3916
rq_lock(rq, &rf);
kernel/sched/core.c
3917
update_rq_clock(rq);
kernel/sched/core.c
3918
ttwu_do_activate(rq, p, wake_flags, &rf);
kernel/sched/core.c
3919
rq_unlock(rq, &rf);
kernel/sched/core.c
4306
struct rq *rq = __task_rq_lock(p, &rf);
kernel/sched/core.c
4320
__task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
4348
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
4352
rq_lock_irqsave(rq, &rf);
kernel/sched/core.c
4355
rq_unlock_irqrestore(rq, &rf);
kernel/sched/core.c
4767
struct rq *rq;
kernel/sched/core.c
4782
rq = __task_rq_lock(p, &rf);
kernel/sched/core.c
4783
update_rq_clock(rq);
kernel/sched/core.c
4786
activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
kernel/sched/core.c
4788
wakeup_preempt(rq, p, wake_flags);
kernel/sched/core.c
4794
rq_unpin_lock(rq, &rf);
kernel/sched/core.c
4795
p->sched_class->task_woken(rq, p);
kernel/sched/core.c
4796
rq_repin_lock(rq, &rf);
kernel/sched/core.c
4798
task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
4916
static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
kernel/sched/core.c
4918
void (*func)(struct rq *rq);
kernel/sched/core.c
4921
lockdep_assert_rq_held(rq);
kernel/sched/core.c
4924
func = (void (*)(struct rq *))head->func;
kernel/sched/core.c
4929
func(rq);
kernel/sched/core.c
4933
static void balance_push(struct rq *rq);
kernel/sched/core.c
4952
__splice_balance_callbacks(struct rq *rq, bool split)
kernel/sched/core.c
4954
struct balance_callback *head = rq->balance_callback;
kernel/sched/core.c
4959
lockdep_assert_rq_held(rq);
kernel/sched/core.c
4971
rq->balance_callback = NULL;
kernel/sched/core.c
4976
struct balance_callback *splice_balance_callbacks(struct rq *rq)
kernel/sched/core.c
4978
return __splice_balance_callbacks(rq, true);
kernel/sched/core.c
4981
void __balance_callbacks(struct rq *rq, struct rq_flags *rf)
kernel/sched/core.c
4984
rq_unpin_lock(rq, rf);
kernel/sched/core.c
4985
do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
kernel/sched/core.c
4987
rq_repin_lock(rq, rf);
kernel/sched/core.c
4990
void balance_callbacks(struct rq *rq, struct balance_callback *head)
kernel/sched/core.c
4995
raw_spin_rq_lock_irqsave(rq, flags);
kernel/sched/core.c
4996
do_balance_callbacks(rq, head);
kernel/sched/core.c
4997
raw_spin_rq_unlock_irqrestore(rq, flags);
kernel/sched/core.c
5002
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
kernel/sched/core.c
5003
__releases(__rq_lockp(rq))
kernel/sched/core.c
5012
rq_unpin_lock(rq, rf);
kernel/sched/core.c
5013
spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_);
kernel/sched/core.c
5016
rq_lockp(rq)->owner = next;
kernel/sched/core.c
5021
__release(__rq_lockp(rq));
kernel/sched/core.c
5025
static inline void finish_lock_switch(struct rq *rq)
kernel/sched/core.c
5026
__releases(__rq_lockp(rq))
kernel/sched/core.c
5033
spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_);
kernel/sched/core.c
5034
__balance_callbacks(rq, NULL);
kernel/sched/core.c
5035
raw_spin_rq_unlock_irq(rq);
kernel/sched/core.c
5080
prepare_task_switch(struct rq *rq, struct task_struct *prev,
kernel/sched/core.c
5082
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
5085
sched_info_switch(rq, prev, next);
kernel/sched/core.c
5112
static struct rq *finish_task_switch(struct task_struct *prev)
kernel/sched/core.c
5115
struct rq *rq = this_rq();
kernel/sched/core.c
5116
struct mm_struct *mm = rq->prev_mm;
kernel/sched/core.c
5135
rq->prev_mm = NULL;
kernel/sched/core.c
5153
finish_lock_switch(rq);
kernel/sched/core.c
5201
return rq;
kernel/sched/core.c
5238
static __always_inline struct rq *
kernel/sched/core.c
5239
context_switch(struct rq *rq, struct task_struct *prev,
kernel/sched/core.c
5241
__releases(__rq_lockp(rq))
kernel/sched/core.c
5243
prepare_task_switch(rq, prev, next);
kernel/sched/core.c
526
static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { }
kernel/sched/core.c
5268
membarrier_switch_mm(rq, prev->active_mm, next->mm);
kernel/sched/core.c
528
sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { }
kernel/sched/core.c
5282
rq->prev_mm = prev->active_mm;
kernel/sched/core.c
5295
prepare_lock_switch(rq, next, rf);
kernel/sched/core.c
5461
struct rq *rq;
kernel/sched/core.c
5480
rq = task_rq_lock(p, &rf);
kernel/sched/core.c
5486
if (task_current_donor(rq, p) && task_on_rq_queued(p)) {
kernel/sched/core.c
5488
update_rq_clock(rq);
kernel/sched/core.c
5489
p->sched_class->update_curr(rq);
kernel/sched/core.c
5492
task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
5497
static u64 cpu_resched_latency(struct rq *rq)
kernel/sched/core.c
5500
u64 resched_latency, now = rq_clock(rq);
kernel/sched/core.c
5512
if (!rq->last_seen_need_resched_ns) {
kernel/sched/core.c
5513
rq->last_seen_need_resched_ns = now;
kernel/sched/core.c
5514
rq->ticks_without_resched = 0;
kernel/sched/core.c
5518
rq->ticks_without_resched++;
kernel/sched/core.c
5519
resched_latency = now - rq->last_seen_need_resched_ns;
kernel/sched/core.c
5549
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
5561
rq_lock(rq, &rf);
kernel/sched/core.c
5562
donor = rq->donor;
kernel/sched/core.c
5564
psi_account_irqtime(rq, donor, NULL);
kernel/sched/core.c
5566
update_rq_clock(rq);
kernel/sched/core.c
5567
hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
kernel/sched/core.c
5568
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
kernel/sched/core.c
5571
resched_curr(rq);
kernel/sched/core.c
5573
donor->sched_class->task_tick(rq, donor, 0);
kernel/sched/core.c
5575
resched_latency = cpu_resched_latency(rq);
kernel/sched/core.c
5576
calc_global_load_tick(rq);
kernel/sched/core.c
5577
sched_core_tick(rq);
kernel/sched/core.c
5578
scx_tick(rq);
kernel/sched/core.c
5580
rq_unlock(rq, &rf);
kernel/sched/core.c
5591
rq->idle_balance = idle_cpu(cpu);
kernel/sched/core.c
5592
sched_balance_trigger(rq);
kernel/sched/core.c
5638
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
5649
guard(rq_lock_irq)(rq);
kernel/sched/core.c
5650
struct task_struct *curr = rq->curr;
kernel/sched/core.c
5658
WARN_ON_ONCE(rq->curr != rq->donor);
kernel/sched/core.c
5659
update_rq_clock(rq);
kernel/sched/core.c
5666
u64 delta = rq_clock_task(rq) - curr->se.exec_start;
kernel/sched/core.c
5669
curr->sched_class->task_tick(rq, curr, 0);
kernel/sched/core.c
5671
calc_load_nohz_remote(rq);
kernel/sched/core.c
5885
static void prev_balance(struct rq *rq, struct task_struct *prev,
kernel/sched/core.c
5900
if (class->balance && class->balance(rq, prev, rf))
kernel/sched/core.c
5909
__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
kernel/sched/core.c
5910
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
5915
rq->dl_server = NULL;
kernel/sched/core.c
5927
rq->nr_running == rq->cfs.h_nr_queued)) {
kernel/sched/core.c
5929
p = pick_next_task_fair(rq, prev, rf);
kernel/sched/core.c
5935
p = pick_task_idle(rq, rf);
kernel/sched/core.c
5936
put_prev_set_next_task(rq, prev, p);
kernel/sched/core.c
5943
prev_balance(rq, prev, rf);
kernel/sched/core.c
5947
p = class->pick_next_task(rq, prev, rf);
kernel/sched/core.c
5953
p = class->pick_task(rq, rf);
kernel/sched/core.c
5957
put_prev_set_next_task(rq, prev, p);
kernel/sched/core.c
5989
static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
kernel/sched/core.c
5994
rq->dl_server = NULL;
kernel/sched/core.c
5997
p = class->pick_task(rq, rf);
kernel/sched/core.c
6005
extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
kernel/sched/core.c
6007
static void queue_core_balance(struct rq *rq);
kernel/sched/core.c
6010
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
kernel/sched/core.c
6011
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
6016
bool core_clock_updated = (rq == rq->core);
kernel/sched/core.c
6019
struct rq *rq_i;
kernel/sched/core.c
6022
if (!sched_core_enabled(rq))
kernel/sched/core.c
6023
return __pick_next_task(rq, prev, rf);
kernel/sched/core.c
6025
cpu = cpu_of(rq);
kernel/sched/core.c
6034
rq->core_pick = NULL;
kernel/sched/core.c
6035
rq->core_dl_server = NULL;
kernel/sched/core.c
6036
return __pick_next_task(rq, prev, rf);
kernel/sched/core.c
6048
if (rq->core->core_pick_seq == rq->core->core_task_seq &&
kernel/sched/core.c
6049
rq->core->core_pick_seq != rq->core_sched_seq &&
kernel/sched/core.c
6050
rq->core_pick) {
kernel/sched/core.c
6051
WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq);
kernel/sched/core.c
6053
next = rq->core_pick;
kernel/sched/core.c
6054
rq->dl_server = rq->core_dl_server;
kernel/sched/core.c
6055
rq->core_pick = NULL;
kernel/sched/core.c
6056
rq->core_dl_server = NULL;
kernel/sched/core.c
6060
prev_balance(rq, prev, rf);
kernel/sched/core.c
6063
need_sync = !!rq->core->core_cookie;
kernel/sched/core.c
6066
rq->core->core_cookie = 0UL;
kernel/sched/core.c
6067
if (rq->core->core_forceidle_count) {
kernel/sched/core.c
6069
update_rq_clock(rq->core);
kernel/sched/core.c
6072
sched_core_account_forceidle(rq);
kernel/sched/core.c
6074
rq->core->core_forceidle_start = 0;
kernel/sched/core.c
6075
rq->core->core_forceidle_count = 0;
kernel/sched/core.c
6076
rq->core->core_forceidle_occupation = 0;
kernel/sched/core.c
6091
rq->core->core_task_seq++;
kernel/sched/core.c
6099
next = pick_task(rq, rf);
kernel/sched/core.c
6103
rq->core_pick = NULL;
kernel/sched/core.c
6104
rq->core_dl_server = NULL;
kernel/sched/core.c
6110
task_vruntime_update(rq, next, false);
kernel/sched/core.c
6131
if (i != cpu && (rq_i != rq->core || !core_clock_updated))
kernel/sched/core.c
6145
cookie = rq->core->core_cookie = max->core_cookie;
kernel/sched/core.c
6168
rq->core->core_forceidle_count++;
kernel/sched/core.c
6170
rq->core->core_forceidle_seq++;
kernel/sched/core.c
6177
if (schedstat_enabled() && rq->core->core_forceidle_count) {
kernel/sched/core.c
6178
rq->core->core_forceidle_start = rq_clock(rq->core);
kernel/sched/core.c
6179
rq->core->core_forceidle_occupation = occ;
kernel/sched/core.c
6182
rq->core->core_pick_seq = rq->core->core_task_seq;
kernel/sched/core.c
6183
next = rq->core_pick;
kernel/sched/core.c
6184
rq->core_sched_seq = rq->core->core_pick_seq;
kernel/sched/core.c
6218
if (!(fi_before && rq->core->core_forceidle_count))
kernel/sched/core.c
6219
task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count);
kernel/sched/core.c
6242
put_prev_set_next_task(rq, prev, next);
kernel/sched/core.c
6243
if (rq->core->core_forceidle_count && next == rq->idle)
kernel/sched/core.c
6244
queue_core_balance(rq);
kernel/sched/core.c
6251
struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
kernel/sched/core.c
6319
static void sched_core_balance(struct rq *rq)
kernel/sched/core.c
6320
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
6323
int cpu = cpu_of(rq);
kernel/sched/core.c
6328
raw_spin_rq_unlock_irq(rq);
kernel/sched/core.c
6336
raw_spin_rq_lock_irq(rq);
kernel/sched/core.c
6341
static void queue_core_balance(struct rq *rq)
kernel/sched/core.c
6343
if (!sched_core_enabled(rq))
kernel/sched/core.c
6346
if (!rq->core->core_cookie)
kernel/sched/core.c
6349
if (!rq->nr_running) /* not forced idle */
kernel/sched/core.c
6352
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
kernel/sched/core.c
6363
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
kernel/sched/core.c
6368
WARN_ON_ONCE(rq->core != rq);
kernel/sched/core.c
6378
rq = cpu_rq(t);
kernel/sched/core.c
6379
if (rq->core == rq) {
kernel/sched/core.c
6380
core_rq = rq;
kernel/sched/core.c
639
void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
kernel/sched/core.c
6390
rq = cpu_rq(t);
kernel/sched/core.c
6393
rq->core = core_rq;
kernel/sched/core.c
6395
WARN_ON_ONCE(rq->core != core_rq);
kernel/sched/core.c
6402
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
kernel/sched/core.c
6409
WARN_ON_ONCE(rq->core != rq);
kernel/sched/core.c
6414
if (rq->core != rq)
kernel/sched/core.c
6429
core_rq->core_task_seq = rq->core_task_seq;
kernel/sched/core.c
6430
core_rq->core_pick_seq = rq->core_pick_seq;
kernel/sched/core.c
6431
core_rq->core_cookie = rq->core_cookie;
kernel/sched/core.c
6432
core_rq->core_forceidle_count = rq->core_forceidle_count;
kernel/sched/core.c
6433
core_rq->core_forceidle_seq = rq->core_forceidle_seq;
kernel/sched/core.c
6434
core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
kernel/sched/core.c
6445
rq = cpu_rq(t);
kernel/sched/core.c
6446
rq->core = core_rq;
kernel/sched/core.c
6452
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
6454
if (rq->core != rq)
kernel/sched/core.c
6455
rq->core = rq;
kernel/sched/core.c
6465
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
kernel/sched/core.c
6466
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
6468
return __pick_next_task(rq, prev, rf);
kernel/sched/core.c
647
raw_spin_lock_nested(&rq->__lock, subclass);
kernel/sched/core.c
6492
static bool try_to_block_task(struct rq *rq, struct task_struct *p,
kernel/sched/core.c
6533
block_task(rq, p, flags);
kernel/sched/core.c
6538
static inline struct task_struct *proxy_resched_idle(struct rq *rq)
kernel/sched/core.c
654
lock = __rq_lockp(rq);
kernel/sched/core.c
6540
put_prev_set_next_task(rq, rq->donor, rq->idle);
kernel/sched/core.c
6541
rq_set_donor(rq, rq->idle);
kernel/sched/core.c
6542
set_tsk_need_resched(rq->idle);
kernel/sched/core.c
6543
return rq->idle;
kernel/sched/core.c
6546
static bool __proxy_deactivate(struct rq *rq, struct task_struct *donor)
kernel/sched/core.c
656
if (likely(lock == __rq_lockp(rq))) {
kernel/sched/core.c
6562
proxy_resched_idle(rq);
kernel/sched/core.c
6563
return try_to_block_task(rq, donor, &state, true);
kernel/sched/core.c
6566
static struct task_struct *proxy_deactivate(struct rq *rq, struct task_struct *donor)
kernel/sched/core.c
6568
if (!__proxy_deactivate(rq, donor)) {
kernel/sched/core.c
6595
find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
kernel/sched/core.c
6598
int this_cpu = cpu_of(rq);
kernel/sched/core.c
6633
return proxy_deactivate(rq, donor);
kernel/sched/core.c
6638
return proxy_deactivate(rq, donor);
kernel/sched/core.c
665
bool raw_spin_rq_trylock(struct rq *rq)
kernel/sched/core.c
6650
return proxy_resched_idle(rq);
kernel/sched/core.c
6687
return proxy_resched_idle(rq);
kernel/sched/core.c
6701
find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
kernel/sched/core.c
6708
static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
kernel/sched/core.c
6721
dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
kernel/sched/core.c
6722
enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
kernel/sched/core.c
674
ret = raw_spin_trylock(&rq->__lock);
kernel/sched/core.c
6776
struct rq *rq;
kernel/sched/core.c
6783
rq = cpu_rq(cpu);
kernel/sched/core.c
6784
prev = rq->curr;
kernel/sched/core.c
6789
hrtick_clear(rq);
kernel/sched/core.c
6795
migrate_disable_switch(rq, prev);
kernel/sched/core.c
680
lock = __rq_lockp(rq);
kernel/sched/core.c
6814
rq_lock(rq, &rf);
kernel/sched/core.c
6818
rq->clock_update_flags <<= 1;
kernel/sched/core.c
6819
update_rq_clock(rq);
kernel/sched/core.c
682
if (!ret || (likely(lock == __rq_lockp(rq)))) {
kernel/sched/core.c
6820
rq->clock_update_flags = RQCF_UPDATED;
kernel/sched/core.c
6834
if (!rq->nr_running && !scx_enabled()) {
kernel/sched/core.c
6836
rq->next_class = &idle_sched_class;
kernel/sched/core.c
6846
try_to_block_task(rq, prev, &prev_state,
kernel/sched/core.c
6852
next = pick_next_task(rq, rq->donor, &rf);
kernel/sched/core.c
6853
rq_set_donor(rq, next);
kernel/sched/core.c
6854
rq->next_class = next->sched_class;
kernel/sched/core.c
6856
next = find_proxy_task(rq, next, &rf);
kernel/sched/core.c
6859
if (next == rq->idle)
kernel/sched/core.c
6866
rq->last_seen_need_resched_ns = 0;
kernel/sched/core.c
6870
rq->nr_switches++;
kernel/sched/core.c
6875
RCU_INIT_POINTER(rq->curr, next);
kernel/sched/core.c
6877
if (!task_current_donor(rq, next))
kernel/sched/core.c
6878
proxy_tag_curr(rq, next);
kernel/sched/core.c
690
void raw_spin_rq_unlock(struct rq *rq)
kernel/sched/core.c
6904
psi_account_irqtime(rq, prev, next);
kernel/sched/core.c
6911
rq = context_switch(rq, prev, next, &rf);
kernel/sched/core.c
6914
if (!task_current_donor(rq, next))
kernel/sched/core.c
6915
proxy_tag_curr(rq, next);
kernel/sched/core.c
6917
rq_unpin_lock(rq, &rf);
kernel/sched/core.c
6918
__balance_callbacks(rq, NULL);
kernel/sched/core.c
6919
raw_spin_rq_unlock_irq(rq);
kernel/sched/core.c
692
raw_spin_unlock(rq_lockp(rq));
kernel/sched/core.c
698
void double_rq_lock(struct rq *rq1, struct rq *rq2)
kernel/sched/core.c
717
struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf)
kernel/sched/core.c
719
struct rq *rq;
kernel/sched/core.c
724
rq = task_rq(p);
kernel/sched/core.c
725
raw_spin_rq_lock(rq);
kernel/sched/core.c
726
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
kernel/sched/core.c
727
rq_pin_lock(rq, rf);
kernel/sched/core.c
728
return rq;
kernel/sched/core.c
730
raw_spin_rq_unlock(rq);
kernel/sched/core.c
7315
struct rq *rq;
kernel/sched/core.c
7326
rq = __task_rq_lock(p, &rf);
kernel/sched/core.c
7327
update_rq_clock(rq);
kernel/sched/core.c
7358
if (unlikely(p == rq->idle)) {
kernel/sched/core.c
7359
WARN_ON(p != rq->curr);
kernel/sched/core.c
740
struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
kernel/sched/core.c
7413
__balance_callbacks(rq, &rf);
kernel/sched/core.c
7414
__task_rq_unlock(rq, p, &rf);
kernel/sched/core.c
742
struct rq *rq;
kernel/sched/core.c
746
rq = task_rq(p);
kernel/sched/core.c
747
raw_spin_rq_lock(rq);
kernel/sched/core.c
765
if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
kernel/sched/core.c
766
rq_pin_lock(rq, rf);
kernel/sched/core.c
767
return rq;
kernel/sched/core.c
769
raw_spin_rq_unlock(rq);
kernel/sched/core.c
786
static void update_rq_clock_task(struct rq *rq, s64 delta)
kernel/sched/core.c
7938
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
7942
raw_spin_rq_lock(rq);
kernel/sched/core.c
796
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
kernel/sched/core.c
7972
rq->idle = idle;
kernel/sched/core.c
7973
rq_set_donor(rq, idle);
kernel/sched/core.c
7974
rcu_assign_pointer(rq->curr, idle);
kernel/sched/core.c
7977
raw_spin_rq_unlock(rq);
kernel/sched/core.c
8087
struct rq *rq = this_rq();
kernel/sched/core.c
8096
context_unsafe_alias(rq);
kernel/sched/core.c
8098
cpu = select_fallback_rq(rq->cpu, p);
kernel/sched/core.c
8100
rq_lock(rq, &rf);
kernel/sched/core.c
8101
update_rq_clock(rq);
kernel/sched/core.c
8102
if (task_rq(p) == rq && task_on_rq_queued(p))
kernel/sched/core.c
8103
rq = __migrate_task(rq, &rf, p, cpu);
kernel/sched/core.c
8104
rq_unlock(rq, &rf);
kernel/sched/core.c
8120
static void balance_push(struct rq *rq)
kernel/sched/core.c
8121
__must_hold(__rq_lockp(rq))
kernel/sched/core.c
8123
struct task_struct *push_task = rq->curr;
kernel/sched/core.c
8125
lockdep_assert_rq_held(rq);
kernel/sched/core.c
8130
rq->balance_callback = &balance_push_callback;
kernel/sched/core.c
8136
if (!cpu_dying(rq->cpu) || rq != this_rq())
kernel/sched/core.c
8157
if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
kernel/sched/core.c
8158
rcuwait_active(&rq->hotplug_wait)) {
kernel/sched/core.c
8159
raw_spin_rq_unlock(rq);
kernel/sched/core.c
816
rq->prev_irq_time += irq_delta;
kernel/sched/core.c
8160
rcuwait_wake_up(&rq->hotplug_wait);
kernel/sched/core.c
8161
raw_spin_rq_lock(rq);
kernel/sched/core.c
8172
raw_spin_rq_unlock(rq);
kernel/sched/core.c
8173
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
kernel/sched/core.c
818
delayacct_irq(rq->curr, irq_delta);
kernel/sched/core.c
8181
raw_spin_rq_lock(rq);
kernel/sched/core.c
8186
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8189
rq_lock_irqsave(rq, &rf);
kernel/sched/core.c
8191
WARN_ON_ONCE(rq->balance_callback);
kernel/sched/core.c
8192
rq->balance_callback = &balance_push_callback;
kernel/sched/core.c
8193
} else if (rq->balance_callback == &balance_push_callback) {
kernel/sched/core.c
8194
rq->balance_callback = NULL;
kernel/sched/core.c
8196
rq_unlock_irqrestore(rq, &rf);
kernel/sched/core.c
8207
struct rq *rq = this_rq();
kernel/sched/core.c
8209
rcuwait_wait_event(&rq->hotplug_wait,
kernel/sched/core.c
8210
rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
kernel/sched/core.c
8216
static inline void balance_push(struct rq *rq)
kernel/sched/core.c
8230
void set_rq_online(struct rq *rq)
kernel/sched/core.c
8232
if (!rq->online) {
kernel/sched/core.c
8235
cpumask_set_cpu(rq->cpu, rq->rd->online);
kernel/sched/core.c
8236
rq->online = 1;
kernel/sched/core.c
8240
class->rq_online(rq);
kernel/sched/core.c
8245
void set_rq_offline(struct rq *rq)
kernel/sched/core.c
8247
if (rq->online) {
kernel/sched/core.c
825
steal = prev_steal = paravirt_steal_clock(cpu_of(rq));
kernel/sched/core.c
8250
update_rq_clock(rq);
kernel/sched/core.c
8253
class->rq_offline(rq);
kernel/sched/core.c
8256
cpumask_clear_cpu(rq->cpu, rq->rd->online);
kernel/sched/core.c
8257
rq->online = 0;
kernel/sched/core.c
826
steal -= rq->prev_steal_time_rq;
kernel/sched/core.c
8261
static inline void sched_set_rq_online(struct rq *rq, int cpu)
kernel/sched/core.c
8265
rq_lock_irqsave(rq, &rf);
kernel/sched/core.c
8266
if (rq->rd) {
kernel/sched/core.c
8267
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
kernel/sched/core.c
8268
set_rq_online(rq);
kernel/sched/core.c
8270
rq_unlock_irqrestore(rq, &rf);
kernel/sched/core.c
8273
static inline void sched_set_rq_offline(struct rq *rq, int cpu)
kernel/sched/core.c
8277
rq_lock_irqsave(rq, &rf);
kernel/sched/core.c
8278
if (rq->rd) {
kernel/sched/core.c
8279
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
kernel/sched/core.c
8280
set_rq_offline(rq);
kernel/sched/core.c
8282
rq_unlock_irqrestore(rq, &rf);
kernel/sched/core.c
831
rq->prev_steal_time_rq = prev_steal;
kernel/sched/core.c
8348
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
836
rq->clock_task += delta;
kernel/sched/core.c
8368
scx_rq_activate(rq);
kernel/sched/core.c
8379
sched_set_rq_online(rq, cpu);
kernel/sched/core.c
8386
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8398
nohz_balance_exit_idle(rq);
kernel/sched/core.c
840
update_irq_load_avg(rq, irq_delta + steal);
kernel/sched/core.c
842
update_rq_clock_pelt(rq, delta);
kernel/sched/core.c
8422
sched_set_rq_offline(rq, cpu);
kernel/sched/core.c
8424
scx_rq_deactivate(rq);
kernel/sched/core.c
8446
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
8448
rq->calc_load_update = calc_load_update;
kernel/sched/core.c
845
void update_rq_clock(struct rq *rq)
kernel/sched/core.c
8489
static void calc_load_migrate(struct rq *rq)
kernel/sched/core.c
8491
long delta = calc_load_fold_active(rq, 1);
kernel/sched/core.c
8497
static void dump_rq_tasks(struct rq *rq, const char *loglvl)
kernel/sched/core.c
850
lockdep_assert_rq_held(rq);
kernel/sched/core.c
8500
int cpu = cpu_of(rq);
kernel/sched/core.c
8502
lockdep_assert_rq_held(rq);
kernel/sched/core.c
8504
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
kernel/sched/core.c
8518
struct rq *rq = cpu_rq(cpu);
kernel/sched/core.c
852
if (rq->clock_update_flags & RQCF_ACT_SKIP)
kernel/sched/core.c
8524
rq_lock_irqsave(rq, &rf);
kernel/sched/core.c
8525
update_rq_clock(rq);
kernel/sched/core.c
8526
if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
kernel/sched/core.c
8528
dump_rq_tasks(rq, KERN_WARNING);
kernel/sched/core.c
8530
dl_server_stop(&rq->fair_server);
kernel/sched/core.c
8532
dl_server_stop(&rq->ext_server);
kernel/sched/core.c
8534
rq_unlock_irqrestore(rq, &rf);
kernel/sched/core.c
8536
calc_load_migrate(rq);
kernel/sched/core.c
8538
hrtick_clear(rq);
kernel/sched/core.c
856
WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
kernel/sched/core.c
857
rq->clock_update_flags |= RQCF_UPDATED;
kernel/sched/core.c
859
clock = sched_clock_cpu(cpu_of(rq));
kernel/sched/core.c
860
scx_rq_clock_update(rq, clock);
kernel/sched/core.c
862
delta = clock - rq->clock;
kernel/sched/core.c
865
rq->clock += delta;
kernel/sched/core.c
8665
struct rq *rq;
kernel/sched/core.c
8667
rq = cpu_rq(i);
kernel/sched/core.c
8668
raw_spin_lock_init(&rq->__lock);
kernel/sched/core.c
8669
rq->nr_running = 0;
kernel/sched/core.c
867
update_rq_clock_task(rq, delta);
kernel/sched/core.c
8670
rq->calc_load_active = 0;
kernel/sched/core.c
8671
rq->calc_load_update = jiffies + LOAD_FREQ;
kernel/sched/core.c
8672
init_cfs_rq(&rq->cfs);
kernel/sched/core.c
8673
init_rt_rq(&rq->rt);
kernel/sched/core.c
8674
init_dl_rq(&rq->dl);
kernel/sched/core.c
8676
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
kernel/sched/core.c
8677
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
kernel/sched/core.c
8697
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
kernel/sched/core.c
8706
rq->rt.rt_runtime = global_rt_runtime();
kernel/sched/core.c
8707
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
kernel/sched/core.c
8709
rq->next_class = &idle_sched_class;
kernel/sched/core.c
8711
rq->sd = NULL;
kernel/sched/core.c
8712
rq->rd = NULL;
kernel/sched/core.c
8713
rq->cpu_capacity = SCHED_CAPACITY_SCALE;
kernel/sched/core.c
8714
rq->balance_callback = &balance_push_callback;
kernel/sched/core.c
8715
rq->active_balance = 0;
kernel/sched/core.c
8716
rq->next_balance = jiffies;
kernel/sched/core.c
8717
rq->push_cpu = 0;
kernel/sched/core.c
8718
rq->cpu = i;
kernel/sched/core.c
8719
rq->online = 0;
kernel/sched/core.c
8720
rq->idle_stamp = 0;
kernel/sched/core.c
8721
rq->avg_idle = 2*sysctl_sched_migration_cost;
kernel/sched/core.c
8722
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
kernel/sched/core.c
8724
INIT_LIST_HEAD(&rq->cfs_tasks);
kernel/sched/core.c
8726
rq_attach_root(rq, &def_root_domain);
kernel/sched/core.c
8728
rq->last_blocked_load_update_tick = jiffies;
kernel/sched/core.c
8729
atomic_set(&rq->nohz_flags, 0);
kernel/sched/core.c
8731
INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
kernel/sched/core.c
8734
rcuwait_init(&rq->hotplug_wait);
kernel/sched/core.c
8736
hrtick_rq_init(rq);
kernel/sched/core.c
8737
atomic_set(&rq->nr_iowait, 0);
kernel/sched/core.c
8738
fair_server_init(rq);
kernel/sched/core.c
8740
ext_server_init(rq);
kernel/sched/core.c
8744
rq->core = rq;
kernel/sched/core.c
8745
rq->core_pick = NULL;
kernel/sched/core.c
8746
rq->core_dl_server = NULL;
kernel/sched/core.c
8747
rq->core_enabled = 0;
kernel/sched/core.c
8748
rq->core_tree = RB_ROOT;
kernel/sched/core.c
8749
rq->core_forceidle_count = 0;
kernel/sched/core.c
875
static void hrtick_clear(struct rq *rq)
kernel/sched/core.c
8750
rq->core_forceidle_occupation = 0;
kernel/sched/core.c
8751
rq->core_forceidle_start = 0;
kernel/sched/core.c
8753
rq->core_cookie = 0UL;
kernel/sched/core.c
8755
zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
kernel/sched/core.c
877
if (hrtimer_active(&rq->hrtick_timer))
kernel/sched/core.c
878
hrtimer_cancel(&rq->hrtick_timer);
kernel/sched/core.c
887
struct rq *rq = container_of(timer, struct rq, hrtick_timer);
kernel/sched/core.c
890
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
kernel/sched/core.c
892
rq_lock(rq, &rf);
kernel/sched/core.c
893
update_rq_clock(rq);
kernel/sched/core.c
894
rq->donor->sched_class->task_tick(rq, rq->donor, 1);
kernel/sched/core.c
895
rq_unlock(rq, &rf);
kernel/sched/core.c
900
static void __hrtick_restart(struct rq *rq)
kernel/sched/core.c
902
struct hrtimer *timer = &rq->hrtick_timer;
kernel/sched/core.c
903
ktime_t time = rq->hrtick_time;
kernel/sched/core.c
913
struct rq *rq = arg;
kernel/sched/core.c
916
rq_lock(rq, &rf);
kernel/sched/core.c
917
__hrtick_restart(rq);
kernel/sched/core.c
9174
struct rq *rq;
kernel/sched/core.c
9177
rq = rq_guard.rq;
kernel/sched/core.c
918
rq_unlock(rq, &rf);
kernel/sched/core.c
9189
resched_curr(rq);
kernel/sched/core.c
9191
wakeup_preempt(rq, tsk, 0);
kernel/sched/core.c
9193
__balance_callbacks(rq, &rq_guard.rf);
kernel/sched/core.c
926
void hrtick_start(struct rq *rq, u64 delay)
kernel/sched/core.c
928
struct hrtimer *timer = &rq->hrtick_timer;
kernel/sched/core.c
936
rq->hrtick_time = ktime_add_ns(hrtimer_cb_get_time(timer), delta);
kernel/sched/core.c
938
if (rq == this_rq())
kernel/sched/core.c
939
__hrtick_restart(rq);
kernel/sched/core.c
941
smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
kernel/sched/core.c
944
static void hrtick_rq_init(struct rq *rq)
kernel/sched/core.c
946
INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
kernel/sched/core.c
947
hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
kernel/sched/core.c
950
static inline void hrtick_clear(struct rq *rq)
kernel/sched/core.c
954
static inline void hrtick_rq_init(struct rq *rq)
kernel/sched/core.c
9557
struct rq *rq = cfs_rq->rq;
kernel/sched/core.c
9559
guard(rq_lock_irq)(rq);
kernel/sched/core_sched.c
242
void __sched_core_account_forceidle(struct rq *rq)
kernel/sched/core_sched.c
244
const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq));
kernel/sched/core_sched.c
245
u64 delta, now = rq_clock(rq->core);
kernel/sched/core_sched.c
246
struct rq *rq_i;
kernel/sched/core_sched.c
250
lockdep_assert_rq_held(rq);
kernel/sched/core_sched.c
252
WARN_ON_ONCE(!rq->core->core_forceidle_count);
kernel/sched/core_sched.c
254
if (rq->core->core_forceidle_start == 0)
kernel/sched/core_sched.c
257
delta = now - rq->core->core_forceidle_start;
kernel/sched/core_sched.c
261
rq->core->core_forceidle_start = now;
kernel/sched/core_sched.c
263
if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) {
kernel/sched/core_sched.c
265
} else if (rq->core->core_forceidle_count > 1 ||
kernel/sched/core_sched.c
266
rq->core->core_forceidle_occupation > 1) {
kernel/sched/core_sched.c
272
delta *= rq->core->core_forceidle_count;
kernel/sched/core_sched.c
273
delta = div_u64(delta, rq->core->core_forceidle_occupation);
kernel/sched/core_sched.c
291
void __sched_core_tick(struct rq *rq)
kernel/sched/core_sched.c
293
if (!rq->core->core_forceidle_count)
kernel/sched/core_sched.c
296
if (rq != rq->core)
kernel/sched/core_sched.c
297
update_rq_clock(rq->core);
kernel/sched/core_sched.c
299
__sched_core_account_forceidle(rq);
kernel/sched/core_sched.c
60
struct rq *rq;
kernel/sched/core_sched.c
62
rq = task_rq_lock(p, &rf);
kernel/sched/core_sched.c
70
WARN_ON_ONCE((p->core_cookie || cookie) && !sched_core_enabled(rq));
kernel/sched/core_sched.c
73
sched_core_dequeue(rq, p, DEQUEUE_SAVE);
kernel/sched/core_sched.c
82
sched_core_enqueue(rq, p);
kernel/sched/core_sched.c
93
if (task_on_cpu(rq, p))
kernel/sched/core_sched.c
94
resched_curr(rq);
kernel/sched/core_sched.c
96
task_rq_unlock(rq, p, &rf);
kernel/sched/cputime.c
1068
struct rq *rq;
kernel/sched/cputime.c
1076
rq = cpu_rq(cpu);
kernel/sched/cputime.c
1082
curr = rcu_dereference(rq->curr);
kernel/sched/cputime.c
227
struct rq *rq = this_rq();
kernel/sched/cputime.c
229
if (atomic_read(&rq->nr_iowait) > 0)
kernel/sched/cputime.c
313
struct rq *rq;
kernel/sched/cputime.c
315
rq = task_rq_lock(t, &rf);
kernel/sched/cputime.c
317
task_rq_unlock(rq, t, &rf);
kernel/sched/cputime.c
983
struct rq *rq;
kernel/sched/cputime.c
989
rq = cpu_rq(cpu);
kernel/sched/cputime.c
995
curr = rcu_dereference(rq->curr);
kernel/sched/deadline.c
1025
struct rq *rq = rq_of_dl_se(dl_se);
kernel/sched/deadline.c
1027
if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
kernel/sched/deadline.c
1028
dl_entity_overflow(dl_se, rq_clock(rq))) {
kernel/sched/deadline.c
1031
!dl_time_before(dl_se->deadline, rq_clock(rq)) &&
kernel/sched/deadline.c
1033
update_dl_revised_wakeup(dl_se, rq);
kernel/sched/deadline.c
1043
replenish_dl_new_period(dl_se, rq);
kernel/sched/deadline.c
1075
struct rq *rq = rq_of_dl_rq(dl_rq);
kernel/sched/deadline.c
1079
lockdep_assert_rq_held(rq);
kernel/sched/deadline.c
1101
delta = ktime_to_ns(now) - rq_clock(rq);
kernel/sched/deadline.c
1130
static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
kernel/sched/deadline.c
1136
if (has_pushable_dl_tasks(rq)) {
kernel/sched/deadline.c
1141
rq_unpin_lock(rq, rf);
kernel/sched/deadline.c
1142
push_dl_task(rq);
kernel/sched/deadline.c
1143
rq_repin_lock(rq, rf);
kernel/sched/deadline.c
1152
struct rq *rq = rq_of_dl_se(dl_se);
kernel/sched/deadline.c
1155
scoped_guard (rq_lock, rq) {
kernel/sched/deadline.c
1162
update_rq_clock(rq);
kernel/sched/deadline.c
1169
rq->donor->sched_class->update_curr(rq);
kernel/sched/deadline.c
1183
if (dl_time_before(rq_clock(dl_se->rq),
kernel/sched/deadline.c
1187
fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
kernel/sched/deadline.c
1198
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
kernel/sched/deadline.c
1199
resched_curr(rq);
kernel/sched/deadline.c
1201
__push_dl_task(rq, rf);
kernel/sched/deadline.c
1227
struct rq *rq;
kernel/sched/deadline.c
1233
rq = task_rq_lock(p, &rf);
kernel/sched/deadline.c
1257
update_rq_clock(rq);
kernel/sched/deadline.c
1278
if (unlikely(!rq->online)) {
kernel/sched/deadline.c
1283
lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
kernel/sched/deadline.c
1284
rq = dl_task_offline_migration(rq, p);
kernel/sched/deadline.c
1285
rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
kernel/sched/deadline.c
1286
update_rq_clock(rq);
kernel/sched/deadline.c
1295
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
kernel/sched/deadline.c
1296
if (dl_task(rq->donor))
kernel/sched/deadline.c
1297
wakeup_preempt_dl(rq, p, 0);
kernel/sched/deadline.c
1299
resched_curr(rq);
kernel/sched/deadline.c
1301
__push_dl_task(rq, &rf);
kernel/sched/deadline.c
1304
task_rq_unlock(rq, p, &rf);
kernel/sched/deadline.c
1342
struct rq *rq = rq_of_dl_se(dl_se);
kernel/sched/deadline.c
1344
if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
kernel/sched/deadline.c
1345
dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
kernel/sched/deadline.c
1377
static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
kernel/sched/deadline.c
1380
u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
kernel/sched/deadline.c
1388
if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
kernel/sched/deadline.c
1391
u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
kernel/sched/deadline.c
1393
u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
kernel/sched/deadline.c
1397
s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
kernel/sched/deadline.c
1409
scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
kernel/sched/deadline.c
1411
int cpu = cpu_of(rq);
kernel/sched/deadline.c
1425
static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
kernel/sched/deadline.c
1427
bool idle = idle_rq(rq);
kernel/sched/deadline.c
1444
scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
kernel/sched/deadline.c
1493
replenish_dl_new_period(dl_se, dl_se->rq);
kernel/sched/deadline.c
1520
update_stats_dequeue_dl(&rq->dl, dl_se, 0);
kernel/sched/deadline.c
1521
dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
kernel/sched/deadline.c
1526
replenish_dl_new_period(dl_se, rq);
kernel/sched/deadline.c
1529
enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
kernel/sched/deadline.c
1533
if (!is_leftmost(dl_se, &rq->dl))
kernel/sched/deadline.c
1534
resched_curr(rq);
kernel/sched/deadline.c
1557
struct rt_rq *rt_rq = &rq->rt;
kernel/sched/deadline.c
1583
update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
kernel/sched/deadline.c
1590
update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
kernel/sched/deadline.c
1799
struct rq *rq = dl_se->rq;
kernel/sched/deadline.c
1808
rq->donor->sched_class->update_curr(rq);
kernel/sched/deadline.c
1810
if (WARN_ON_ONCE(!cpu_online(cpu_of(rq))))
kernel/sched/deadline.c
1815
if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
kernel/sched/deadline.c
1816
resched_curr(dl_se->rq);
kernel/sched/deadline.c
183
struct rq *rq = cpu_rq(i);
kernel/sched/deadline.c
1832
void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
kernel/sched/deadline.c
1835
dl_se->rq = rq;
kernel/sched/deadline.c
1842
struct rq *rq;
kernel/sched/deadline.c
1849
rq = cpu_rq(cpu);
kernel/sched/deadline.c
185
rq->dl.extra_bw += bw;
kernel/sched/deadline.c
1851
guard(rq_lock_irq)(rq);
kernel/sched/deadline.c
1852
update_rq_clock(rq);
kernel/sched/deadline.c
1854
dl_se = &rq->fair_server;
kernel/sched/deadline.c
1865
dl_se = &rq->ext_server;
kernel/sched/deadline.c
1878
void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
kernel/sched/deadline.c
1881
int cpu = cpu_of(rq);
kernel/sched/deadline.c
1884
dl_b = dl_bw_of(cpu_of(rq));
kernel/sched/deadline.c
1897
struct rq *rq = dl_se->rq;
kernel/sched/deadline.c
1898
int cpu = cpu_of(rq);
kernel/sched/deadline.c
1913
__add_rq_bw(new_bw, &rq->dl);
kernel/sched/deadline.c
1919
dl_rq_change_utilization(rq, dl_se, new_bw);
kernel/sched/deadline.c
1939
static void update_curr_dl(struct rq *rq)
kernel/sched/deadline.c
1941
struct task_struct *donor = rq->donor;
kernel/sched/deadline.c
1956
delta_exec = update_curr_common(rq);
kernel/sched/deadline.c
1957
update_curr_dl_se(rq, dl_se, delta_exec);
kernel/sched/deadline.c
1967
struct rq *rq;
kernel/sched/deadline.c
1971
rq = task_rq_lock(p, &rf);
kernel/sched/deadline.c
1973
rq = dl_se->rq;
kernel/sched/deadline.c
1974
rq_lock(rq, &rf);
kernel/sched/deadline.c
1978
update_rq_clock(rq);
kernel/sched/deadline.c
2004
sub_running_bw(dl_se, &rq->dl);
kernel/sched/deadline.c
2009
task_rq_unlock(rq, p, &rf);
kernel/sched/deadline.c
2012
rq_unlock(rq, &rf);
kernel/sched/deadline.c
2030
struct rq *rq = rq_of_dl_rq(dl_rq);
kernel/sched/deadline.c
2035
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
kernel/sched/deadline.c
2037
cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
kernel/sched/deadline.c
2043
struct rq *rq = rq_of_dl_rq(dl_rq);
kernel/sched/deadline.c
2052
cpudl_clear(&rq->rd->cpudl, rq->cpu, rq->online);
kernel/sched/deadline.c
2053
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
kernel/sched/deadline.c
2059
cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
kernel/sched/deadline.c
2292
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/deadline.c
2348
if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
kernel/sched/deadline.c
2349
enqueue_pushable_dl_task(rq, p);
kernel/sched/deadline.c
2352
static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/deadline.c
2354
update_curr_dl(rq);
kernel/sched/deadline.c
2361
dequeue_pushable_dl_task(rq, p);
kernel/sched/deadline.c
2376
static void yield_task_dl(struct rq *rq)
kernel/sched/deadline.c
2384
rq->donor->dl.dl_yielded = 1;
kernel/sched/deadline.c
2386
update_rq_clock(rq);
kernel/sched/deadline.c
2387
update_curr_dl(rq);
kernel/sched/deadline.c
2393
rq_clock_skip_update(rq);
kernel/sched/deadline.c
2397
struct rq *rq)
kernel/sched/deadline.c
2399
return (!rq->dl.dl_nr_running ||
kernel/sched/deadline.c
2401
rq->dl.earliest_dl.curr));
kernel/sched/deadline.c
2411
struct rq *rq;
kernel/sched/deadline.c
2416
rq = cpu_rq(cpu);
kernel/sched/deadline.c
2419
curr = READ_ONCE(rq->curr); /* unlocked access */
kernel/sched/deadline.c
2420
donor = READ_ONCE(rq->donor);
kernel/sched/deadline.c
2458
struct rq *rq;
kernel/sched/deadline.c
2463
rq = task_rq(p);
kernel/sched/deadline.c
2469
rq_lock(rq, &rf);
kernel/sched/deadline.c
2471
update_rq_clock(rq);
kernel/sched/deadline.c
2472
sub_running_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
2483
sub_rq_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
2484
rq_unlock(rq, &rf);
kernel/sched/deadline.c
2487
static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
2493
if (rq->curr->nr_cpus_allowed == 1 ||
kernel/sched/deadline.c
2494
!cpudl_find(&rq->rd->cpudl, rq->donor, NULL))
kernel/sched/deadline.c
2502
cpudl_find(&rq->rd->cpudl, p, NULL))
kernel/sched/deadline.c
2505
resched_curr(rq);
kernel/sched/deadline.c
2508
static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
kernel/sched/deadline.c
2510
if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
kernel/sched/deadline.c
2517
rq_unpin_lock(rq, rf);
kernel/sched/deadline.c
2518
pull_dl_task(rq);
kernel/sched/deadline.c
2519
rq_repin_lock(rq, rf);
kernel/sched/deadline.c
2522
return sched_stop_runnable(rq) || sched_dl_runnable(rq);
kernel/sched/deadline.c
2529
static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/deadline.c
2539
if (dl_entity_preempt(&p->dl, &rq->donor->dl)) {
kernel/sched/deadline.c
2540
resched_curr(rq);
kernel/sched/deadline.c
2548
if ((p->dl.deadline == rq->donor->dl.deadline) &&
kernel/sched/deadline.c
2549
!test_tsk_need_resched(rq->curr))
kernel/sched/deadline.c
2550
check_preempt_equal_dl(rq, p);
kernel/sched/deadline.c
2554
static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
kernel/sched/deadline.c
2556
hrtick_start(rq, dl_se->runtime);
kernel/sched/deadline.c
2559
static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
kernel/sched/deadline.c
2564
static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
kernel/sched/deadline.c
2567
struct dl_rq *dl_rq = &rq->dl;
kernel/sched/deadline.c
2569
p->se.exec_start = rq_clock_task(rq);
kernel/sched/deadline.c
2574
dequeue_pushable_dl_task(rq, p);
kernel/sched/deadline.c
2579
if (rq->donor->sched_class != &dl_sched_class)
kernel/sched/deadline.c
2580
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
kernel/sched/deadline.c
2582
deadline_queue_push_tasks(rq);
kernel/sched/deadline.c
2584
if (hrtick_enabled_dl(rq))
kernel/sched/deadline.c
2585
start_hrtick_dl(rq, &p->dl);
kernel/sched/deadline.c
2602
static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
kernel/sched/deadline.c
2605
struct dl_rq *dl_rq = &rq->dl;
kernel/sched/deadline.c
2609
if (!sched_dl_runnable(rq))
kernel/sched/deadline.c
2621
rq->dl_server = dl_se;
kernel/sched/deadline.c
2629
static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
kernel/sched/deadline.c
2631
return __pick_task_dl(rq, rf);
kernel/sched/deadline.c
2634
static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
kernel/sched/deadline.c
2637
struct dl_rq *dl_rq = &rq->dl;
kernel/sched/deadline.c
2642
update_curr_dl(rq);
kernel/sched/deadline.c
2644
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
kernel/sched/deadline.c
2650
enqueue_pushable_dl_task(rq, p);
kernel/sched/deadline.c
2661
static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
kernel/sched/deadline.c
2663
update_curr_dl(rq);
kernel/sched/deadline.c
2665
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
kernel/sched/deadline.c
2671
if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
kernel/sched/deadline.c
2672
is_leftmost(&p->dl, &rq->dl))
kernel/sched/deadline.c
2673
start_hrtick_dl(rq, &p->dl);
kernel/sched/deadline.c
2691
static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
kernel/sched/deadline.c
2696
if (!has_pushable_dl_tasks(rq))
kernel/sched/deadline.c
2699
next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
kernel/sched/deadline.c
2703
if (task_is_pushable(rq, p, cpu))
kernel/sched/deadline.c
2802
static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
kernel/sched/deadline.c
2806
if (!has_pushable_dl_tasks(rq))
kernel/sched/deadline.c
2809
p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
kernel/sched/deadline.c
2811
WARN_ON_ONCE(rq->cpu != task_cpu(p));
kernel/sched/deadline.c
2812
WARN_ON_ONCE(task_current(rq, p));
kernel/sched/deadline.c
2822
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
kernel/sched/deadline.c
2824
struct rq *later_rq = NULL;
kernel/sched/deadline.c
2831
if ((cpu == -1) || (cpu == rq->cpu))
kernel/sched/deadline.c
2847
if (double_lock_balance(rq, later_rq)) {
kernel/sched/deadline.c
2872
(task_rq(task) != rq ||
kernel/sched/deadline.c
2873
task_on_cpu(rq, task) ||
kernel/sched/deadline.c
2877
task != pick_next_pushable_dl_task(rq)))) {
kernel/sched/deadline.c
2879
double_unlock_balance(rq, later_rq);
kernel/sched/deadline.c
288
static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
kernel/sched/deadline.c
2894
double_unlock_balance(rq, later_rq);
kernel/sched/deadline.c
2906
static int push_dl_task(struct rq *rq)
kernel/sched/deadline.c
2909
struct rq *later_rq;
kernel/sched/deadline.c
291
sub_running_bw(dl_se, &rq->dl);
kernel/sched/deadline.c
2912
next_task = pick_next_pushable_dl_task(rq);
kernel/sched/deadline.c
2922
if (dl_task(rq->donor) &&
kernel/sched/deadline.c
2923
dl_time_before(next_task->dl.deadline, rq->donor->dl.deadline) &&
kernel/sched/deadline.c
2924
rq->curr->nr_cpus_allowed > 1) {
kernel/sched/deadline.c
2925
resched_curr(rq);
kernel/sched/deadline.c
2932
if (WARN_ON(next_task == rq->curr))
kernel/sched/deadline.c
2939
later_rq = find_lock_later_rq(next_task, rq);
kernel/sched/deadline.c
2948
task = pick_next_pushable_dl_task(rq);
kernel/sched/deadline.c
2966
move_queued_task_locked(rq, later_rq, next_task);
kernel/sched/deadline.c
2971
double_unlock_balance(rq, later_rq);
kernel/sched/deadline.c
2979
static void push_dl_tasks(struct rq *rq)
kernel/sched/deadline.c
2982
while (push_dl_task(rq))
kernel/sched/deadline.c
2986
static void pull_dl_task(struct rq *this_rq)
kernel/sched/deadline.c
2991
struct rq *src_rq;
kernel/sched/deadline.c
306
__sub_rq_bw(dl_se->dl_bw, &rq->dl);
kernel/sched/deadline.c
307
__add_rq_bw(new_bw, &rq->dl);
kernel/sched/deadline.c
3080
static void task_woken_dl(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
3082
if (!task_on_cpu(rq, p) &&
kernel/sched/deadline.c
3083
!test_tsk_need_resched(rq->curr) &&
kernel/sched/deadline.c
3085
dl_task(rq->donor) &&
kernel/sched/deadline.c
3086
(rq->curr->nr_cpus_allowed < 2 ||
kernel/sched/deadline.c
3087
!dl_entity_preempt(&p->dl, &rq->donor->dl))) {
kernel/sched/deadline.c
3088
push_dl_tasks(rq);
kernel/sched/deadline.c
3096
struct rq *rq;
kernel/sched/deadline.c
3100
rq = task_rq(p);
kernel/sched/deadline.c
3101
src_rd = rq->rd;
kernel/sched/deadline.c
3111
src_dl_b = dl_bw_of(cpu_of(rq));
kernel/sched/deadline.c
3126
static void rq_online_dl(struct rq *rq)
kernel/sched/deadline.c
3128
if (rq->dl.overloaded)
kernel/sched/deadline.c
3129
dl_set_overload(rq);
kernel/sched/deadline.c
3131
if (rq->dl.dl_nr_running > 0)
kernel/sched/deadline.c
3132
cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
kernel/sched/deadline.c
3134
cpudl_clear(&rq->rd->cpudl, rq->cpu, true);
kernel/sched/deadline.c
3138
static void rq_offline_dl(struct rq *rq)
kernel/sched/deadline.c
3140
if (rq->dl.overloaded)
kernel/sched/deadline.c
3141
dl_clear_overload(rq);
kernel/sched/deadline.c
3143
cpudl_clear(&rq->rd->cpudl, rq->cpu, false);
kernel/sched/deadline.c
3188
struct rq *rq;
kernel/sched/deadline.c
3203
rq = cpu_rq(cpu);
kernel/sched/deadline.c
3204
dl_b = &rq->rd->dl_bw;
kernel/sched/deadline.c
3207
__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
kernel/sched/deadline.c
3269
static void switched_from_dl(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
3296
sub_running_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
3297
sub_rq_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
3313
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
kernel/sched/deadline.c
3316
deadline_queue_pull_task(rq);
kernel/sched/deadline.c
3323
static void switched_to_dl(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
3335
add_rq_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
3340
if (rq->donor != p) {
kernel/sched/deadline.c
3341
if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
kernel/sched/deadline.c
3342
deadline_queue_push_tasks(rq);
kernel/sched/deadline.c
3343
if (dl_task(rq->donor))
kernel/sched/deadline.c
3344
wakeup_preempt_dl(rq, p, 0);
kernel/sched/deadline.c
3346
resched_curr(rq);
kernel/sched/deadline.c
3348
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
kernel/sched/deadline.c
3352
static u64 get_prio_dl(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
3357
if (task_current_donor(rq, p))
kernel/sched/deadline.c
3358
update_curr_dl(rq);
kernel/sched/deadline.c
3367
static void prio_changed_dl(struct rq *rq, struct task_struct *p, u64 old_deadline)
kernel/sched/deadline.c
3376
deadline_queue_pull_task(rq);
kernel/sched/deadline.c
3378
if (task_current_donor(rq, p)) {
kernel/sched/deadline.c
3384
if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
kernel/sched/deadline.c
3385
resched_curr(rq);
kernel/sched/deadline.c
3393
if (!dl_task(rq->curr) ||
kernel/sched/deadline.c
3394
dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
kernel/sched/deadline.c
3395
resched_curr(rq);
kernel/sched/deadline.c
402
struct rq *rq = rq_of_dl_se(dl_se);
kernel/sched/deadline.c
403
struct dl_rq *dl_rq = &rq->dl;
kernel/sched/deadline.c
426
zerolag_time -= rq_clock(rq);
kernel/sched/deadline.c
445
sub_rq_bw(dl_se, &rq->dl);
kernel/sched/deadline.c
531
static inline int dl_overloaded(struct rq *rq)
kernel/sched/deadline.c
533
return atomic_read(&rq->rd->dlo_count);
kernel/sched/deadline.c
536
static inline void dl_set_overload(struct rq *rq)
kernel/sched/deadline.c
538
if (!rq->online)
kernel/sched/deadline.c
541
cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
kernel/sched/deadline.c
549
atomic_inc(&rq->rd->dlo_count);
kernel/sched/deadline.c
552
static inline void dl_clear_overload(struct rq *rq)
kernel/sched/deadline.c
554
if (!rq->online)
kernel/sched/deadline.c
557
atomic_dec(&rq->rd->dlo_count);
kernel/sched/deadline.c
558
cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
kernel/sched/deadline.c
569
static inline int has_pushable_dl_tasks(struct rq *rq)
kernel/sched/deadline.c
571
return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
kernel/sched/deadline.c
578
static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
585
&rq->dl.pushable_dl_tasks_root,
kernel/sched/deadline.c
588
rq->dl.earliest_dl.next = p->dl.deadline;
kernel/sched/deadline.c
590
if (!rq->dl.overloaded) {
kernel/sched/deadline.c
591
dl_set_overload(rq);
kernel/sched/deadline.c
592
rq->dl.overloaded = 1;
kernel/sched/deadline.c
596
static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
598
struct dl_rq *dl_rq = &rq->dl;
kernel/sched/deadline.c
611
if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
kernel/sched/deadline.c
612
dl_clear_overload(rq);
kernel/sched/deadline.c
613
rq->dl.overloaded = 0;
kernel/sched/deadline.c
617
static int push_dl_task(struct rq *rq);
kernel/sched/deadline.c
619
static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
kernel/sched/deadline.c
621
return rq->online && dl_task(prev);
kernel/sched/deadline.c
627
static void push_dl_tasks(struct rq *);
kernel/sched/deadline.c
628
static void pull_dl_task(struct rq *);
kernel/sched/deadline.c
630
static inline void deadline_queue_push_tasks(struct rq *rq)
kernel/sched/deadline.c
632
if (!has_pushable_dl_tasks(rq))
kernel/sched/deadline.c
635
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
kernel/sched/deadline.c
638
static inline void deadline_queue_pull_task(struct rq *rq)
kernel/sched/deadline.c
640
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
kernel/sched/deadline.c
643
static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
kernel/sched/deadline.c
645
static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
kernel/sched/deadline.c
647
struct rq *later_rq = NULL;
kernel/sched/deadline.c
650
later_rq = find_lock_later_rq(p, rq);
kernel/sched/deadline.c
674
double_lock_balance(rq, later_rq);
kernel/sched/deadline.c
684
sub_running_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
685
sub_rq_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
690
sub_rq_bw(&p->dl, &rq->dl);
kernel/sched/deadline.c
699
dl_b = &rq->rd->dl_bw;
kernel/sched/deadline.c
701
__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
kernel/sched/deadline.c
71
static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
kernel/sched/deadline.c
710
double_unlock_balance(later_rq, rq);
kernel/sched/deadline.c
717
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/deadline.c
719
static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/deadline.c
722
struct rq *rq)
kernel/sched/deadline.c
725
dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
kernel/sched/deadline.c
73
return container_of(dl_rq, struct rq, dl);
kernel/sched/deadline.c
753
struct rq *rq = rq_of_dl_rq(dl_rq);
kernel/sched/deadline.c
756
WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
kernel/sched/deadline.c
76
static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
kernel/sched/deadline.c
771
replenish_dl_new_period(dl_se, rq);
kernel/sched/deadline.c
78
struct rq *rq = dl_se->rq;
kernel/sched/deadline.c
798
struct rq *rq = rq_of_dl_rq(dl_rq);
kernel/sched/deadline.c
81
rq = task_rq(dl_task_of(dl_se));
kernel/sched/deadline.c
813
(dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
kernel/sched/deadline.c
814
dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
kernel/sched/deadline.c
83
return rq;
kernel/sched/deadline.c
841
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
kernel/sched/deadline.c
843
replenish_dl_new_period(dl_se, rq);
kernel/sched/deadline.c
866
dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
kernel/sched/deadline.c
962
update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
kernel/sched/deadline.c
964
u64 laxity = dl_se->deadline - rq_clock(rq);
kernel/sched/deadline.c
972
WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
kernel/sched/debug.c
1031
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
1046
if (sizeof(rq->x) == 4) \
kernel/sched/debug.c
1047
SEQ_printf(m, " .%-30s: %d\n", #x, (int)(rq->x)); \
kernel/sched/debug.c
1049
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
kernel/sched/debug.c
1053
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
kernel/sched/debug.c
1059
SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
kernel/sched/debug.c
1065
#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
kernel/sched/debug.c
1070
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
kernel/sched/debug.c
1084
print_rq(m, rq, cpu);
kernel/sched/debug.c
343
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
352
scoped_guard (rq_lock_irqsave, rq) {
kernel/sched/debug.c
375
update_rq_clock(rq);
kernel/sched/debug.c
386
server == &rq->fair_server ? "Fair" : "Ext",
kernel/sched/debug.c
388
cpu_of(rq),
kernel/sched/debug.c
420
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
423
&rq->fair_server);
kernel/sched/debug.c
429
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
431
return sched_server_show_common(m, v, DL_RUNTIME, &rq->fair_server);
kernel/sched/debug.c
453
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
456
&rq->ext_server);
kernel/sched/debug.c
462
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
464
return sched_server_show_common(m, v, DL_RUNTIME, &rq->ext_server);
kernel/sched/debug.c
486
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
489
&rq->fair_server);
kernel/sched/debug.c
495
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
497
return sched_server_show_common(m, v, DL_PERIOD, &rq->fair_server);
kernel/sched/debug.c
519
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
522
&rq->ext_server);
kernel/sched/debug.c
528
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
530
return sched_server_show_common(m, v, DL_PERIOD, &rq->ext_server);
kernel/sched/debug.c
832
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
kernel/sched/debug.c
834
if (task_current(rq, p))
kernel/sched/debug.c
865
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
kernel/sched/debug.c
897
print_task(m, rq, p);
kernel/sched/debug.c
907
struct rq *rq = cpu_rq(cpu);
kernel/sched/debug.c
918
raw_spin_rq_lock_irqsave(rq, flags);
kernel/sched/debug.c
930
raw_spin_rq_unlock_irqrestore(rq, flags);
kernel/sched/ext.c
1004
if (rq->scx.flags & SCX_RQ_IN_BALANCE)
kernel/sched/ext.c
1007
if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
kernel/sched/ext.c
1008
rq->curr->sched_class == &ext_sched_class) {
kernel/sched/ext.c
1009
rq->curr->scx.slice = 0;
kernel/sched/ext.c
1013
if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
kernel/sched/ext.c
1014
resched_curr(rq);
kernel/sched/ext.c
1147
static void dispatch_dequeue(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
1150
bool is_local = dsq == &rq->scx.local_dsq;
kernel/sched/ext.c
1152
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
120
struct rq *rq;
kernel/sched/ext.c
1215
struct rq *rq, u64 dsq_id,
kernel/sched/ext.c
1221
return &rq->scx.local_dsq;
kernel/sched/ext.c
1301
struct rq *rq = task_rq(p);
kernel/sched/ext.c
1303
find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
kernel/sched/ext.c
1306
touch_core_sched_dispatch(rq, p);
kernel/sched/ext.c
1316
if (dsq->id == SCX_DSQ_LOCAL && dsq != &rq->scx.local_dsq) {
kernel/sched/ext.c
1340
&rq->scx.ddsp_deferred_locals);
kernel/sched/ext.c
1341
schedule_deferred_locked(rq);
kernel/sched/ext.c
1351
static bool scx_rq_online(struct rq *rq)
kernel/sched/ext.c
1360
return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq)));
kernel/sched/ext.c
1363
static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
kernel/sched/ext.c
1374
if (sticky_cpu == cpu_of(rq))
kernel/sched/ext.c
1382
if (!scx_rq_online(rq))
kernel/sched/ext.c
1385
if (scx_rq_bypassing(rq)) {
kernel/sched/ext.c
1411
qseq = rq->scx.ops_qseq++ << SCX_OPSS_QSEQ_SHIFT;
kernel/sched/ext.c
1420
SCX_CALL_OP_TASK(sch, SCX_KF_ENQUEUE, enqueue, rq, p, enq_flags);
kernel/sched/ext.c
1437
dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
kernel/sched/ext.c
1440
dsq = &rq->scx.local_dsq;
kernel/sched/ext.c
1455
touch_core_sched(rq, p);
kernel/sched/ext.c
1466
static void set_task_runnable(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
1468
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
1479
list_add_tail(&p->scx.runnable_node, &rq->scx.runnable_list);
kernel/sched/ext.c
1489
static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
kernel/sched/ext.c
1493
u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
kernel/sched/ext.c
1496
rq->scx.flags |= SCX_RQ_IN_WAKEUP;
kernel/sched/ext.c
1507
if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p))
kernel/sched/ext.c
1508
sticky_cpu = cpu_of(rq);
kernel/sched/ext.c
1515
set_task_runnable(rq, p);
kernel/sched/ext.c
1517
rq->scx.nr_running++;
kernel/sched/ext.c
1518
add_nr_running(rq, 1);
kernel/sched/ext.c
1521
SCX_CALL_OP_TASK(sch, SCX_KF_REST, runnable, rq, p, enq_flags);
kernel/sched/ext.c
1524
touch_core_sched(rq, p);
kernel/sched/ext.c
1527
if (rq->scx.nr_running == 1)
kernel/sched/ext.c
1528
dl_server_start(&rq->ext_server);
kernel/sched/ext.c
1530
do_enqueue_task(rq, p, enq_flags, sticky_cpu);
kernel/sched/ext.c
1532
rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
kernel/sched/ext.c
1535
unlikely(cpu_of(rq) != p->scx.selected_cpu))
kernel/sched/ext.c
1539
static void ops_dequeue(struct rq *rq, struct task_struct *p, u64 deq_flags)
kernel/sched/ext.c
1561
SCX_CALL_OP_TASK(sch, SCX_KF_REST, dequeue, rq,
kernel/sched/ext.c
1588
static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags)
kernel/sched/ext.c
1597
ops_dequeue(rq, p, deq_flags);
kernel/sched/ext.c
1611
if (SCX_HAS_OP(sch, stopping) && task_current(rq, p)) {
kernel/sched/ext.c
1612
update_curr_scx(rq);
kernel/sched/ext.c
1613
SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, false);
kernel/sched/ext.c
1617
SCX_CALL_OP_TASK(sch, SCX_KF_REST, quiescent, rq, p, deq_flags);
kernel/sched/ext.c
1625
rq->scx.nr_running--;
kernel/sched/ext.c
1626
sub_nr_running(rq, 1);
kernel/sched/ext.c
1628
dispatch_dequeue(rq, p);
kernel/sched/ext.c
1633
static void yield_task_scx(struct rq *rq)
kernel/sched/ext.c
1636
struct task_struct *p = rq->donor;
kernel/sched/ext.c
1639
SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq, p, NULL);
kernel/sched/ext.c
1644
static bool yield_to_task_scx(struct rq *rq, struct task_struct *to)
kernel/sched/ext.c
1647
struct task_struct *from = rq->donor;
kernel/sched/ext.c
1650
return SCX_CALL_OP_2TASKS_RET(sch, SCX_KF_REST, yield, rq,
kernel/sched/ext.c
1658
struct rq *dst_rq)
kernel/sched/ext.c
1689
struct rq *src_rq, struct rq *dst_rq)
kernel/sched/ext.c
1733
struct task_struct *p, struct rq *rq,
kernel/sched/ext.c
1736
int cpu = cpu_of(rq);
kernel/sched/ext.c
1772
if (!scx_rq_online(rq)) {
kernel/sched/ext.c
1812
struct rq *src_rq)
kernel/sched/ext.c
1830
static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
kernel/sched/ext.c
1831
struct scx_dispatch_q *dsq, struct rq *src_rq)
kernel/sched/ext.c
1861
static struct rq *move_task_between_dsqs(struct scx_sched *sch,
kernel/sched/ext.c
1866
struct rq *src_rq = task_rq(p), *dst_rq;
kernel/sched/ext.c
1873
dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
kernel/sched/ext.c
1914
static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq,
kernel/sched/ext.c
1930
struct rq *task_rq = task_rq(p);
kernel/sched/ext.c
1943
if (rq == task_rq) {
kernel/sched/ext.c
1945
move_local_task_to_local_dsq(p, 0, dsq, rq);
kernel/sched/ext.c
1950
if (task_can_run_on_remote_rq(sch, p, rq, false)) {
kernel/sched/ext.c
1951
if (likely(consume_remote_task(rq, p, dsq, task_rq)))
kernel/sched/ext.c
196
static void process_ddsp_deferred_locals(struct rq *rq);
kernel/sched/ext.c
1961
static bool consume_global_dsq(struct scx_sched *sch, struct rq *rq)
kernel/sched/ext.c
1963
int node = cpu_to_node(cpu_of(rq));
kernel/sched/ext.c
1965
return consume_dispatch_q(sch, rq, sch->global_dsqs[node]);
kernel/sched/ext.c
198
static u32 reenq_local(struct rq *rq);
kernel/sched/ext.c
1983
static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
kernel/sched/ext.c
1987
struct rq *src_rq = task_rq(p);
kernel/sched/ext.c
1988
struct rq *dst_rq = container_of(dst_dsq, struct rq, scx.local_dsq);
kernel/sched/ext.c
1989
struct rq *locked_rq = rq;
kernel/sched/ext.c
1997
if (rq == src_rq && rq == dst_rq) {
kernel/sched/ext.c
2058
if (locked_rq != rq) {
kernel/sched/ext.c
2060
raw_spin_rq_lock(rq);
kernel/sched/ext.c
2083
static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
kernel/sched/ext.c
2091
touch_core_sched_dispatch(rq, p);
kernel/sched/ext.c
2140
dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
kernel/sched/ext.c
2145
static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
kernel/sched/ext.c
2153
finish_dispatch(sch, rq, ent->task, ent->qseq, ent->dsq_id,
kernel/sched/ext.c
2161
static inline void maybe_queue_balance_callback(struct rq *rq)
kernel/sched/ext.c
2163
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
2165
if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
kernel/sched/ext.c
2168
queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
kernel/sched/ext.c
2171
rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
kernel/sched/ext.c
2174
static int balance_one(struct rq *rq, struct task_struct *prev)
kernel/sched/ext.c
2182
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
2183
rq->scx.flags |= SCX_RQ_IN_BALANCE;
kernel/sched/ext.c
2184
rq->scx.flags &= ~SCX_RQ_BAL_KEEP;
kernel/sched/ext.c
2187
unlikely(rq->scx.cpu_released)) {
kernel/sched/ext.c
2195
SCX_CALL_OP(sch, SCX_KF_REST, cpu_acquire, rq,
kernel/sched/ext.c
2196
cpu_of(rq), NULL);
kernel/sched/ext.c
2197
rq->scx.cpu_released = false;
kernel/sched/ext.c
2201
update_curr_scx(rq);
kernel/sched/ext.c
2213
if (prev_on_rq && prev->scx.slice && !scx_rq_bypassing(rq)) {
kernel/sched/ext.c
2214
rq->scx.flags |= SCX_RQ_BAL_KEEP;
kernel/sched/ext.c
2220
if (rq->scx.local_dsq.nr)
kernel/sched/ext.c
2223
if (consume_global_dsq(sch, rq))
kernel/sched/ext.c
2226
if (scx_rq_bypassing(rq)) {
kernel/sched/ext.c
2227
if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq))
kernel/sched/ext.c
2233
if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq))
kernel/sched/ext.c
2236
dspc->rq = rq;
kernel/sched/ext.c
2248
SCX_CALL_OP(sch, SCX_KF_DISPATCH, dispatch, rq,
kernel/sched/ext.c
2249
cpu_of(rq), prev_on_scx ? prev : NULL);
kernel/sched/ext.c
2251
flush_dispatch_buf(sch, rq);
kernel/sched/ext.c
2254
rq->scx.flags |= SCX_RQ_BAL_KEEP;
kernel/sched/ext.c
2257
if (rq->scx.local_dsq.nr)
kernel/sched/ext.c
2259
if (consume_global_dsq(sch, rq))
kernel/sched/ext.c
2272
scx_kick_cpu(sch, cpu_of(rq), 0);
kernel/sched/ext.c
2283
(!(sch->ops.flags & SCX_OPS_ENQ_LAST) || scx_rq_bypassing(rq))) {
kernel/sched/ext.c
2284
rq->scx.flags |= SCX_RQ_BAL_KEEP;
kernel/sched/ext.c
2288
rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
kernel/sched/ext.c
2292
rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
kernel/sched/ext.c
2296
static void process_ddsp_deferred_locals(struct rq *rq)
kernel/sched/ext.c
2300
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
2309
while ((p = list_first_entry_or_null(&rq->scx.ddsp_deferred_locals,
kernel/sched/ext.c
2319
dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p);
kernel/sched/ext.c
2321
dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
kernel/sched/ext.c
2325
static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
kernel/sched/ext.c
2334
ops_dequeue(rq, p, SCX_DEQ_CORE_SCHED_EXEC);
kernel/sched/ext.c
2335
dispatch_dequeue(rq, p);
kernel/sched/ext.c
2338
p->se.exec_start = rq_clock_task(rq);
kernel/sched/ext.c
2342
SCX_CALL_OP_TASK(sch, SCX_KF_REST, running, rq, p);
kernel/sched/ext.c
2351
(bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) {
kernel/sched/ext.c
2353
rq->scx.flags |= SCX_RQ_CAN_STOP_TICK;
kernel/sched/ext.c
2355
rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK;
kernel/sched/ext.c
2357
sched_update_tick_dependency(rq);
kernel/sched/ext.c
2365
update_other_load_avgs(rq);
kernel/sched/ext.c
2381
static void switch_class(struct rq *rq, struct task_struct *next)
kernel/sched/ext.c
2407
if (!rq->scx.cpu_released) {
kernel/sched/ext.c
2414
SCX_CALL_OP(sch, SCX_KF_CPU_RELEASE, cpu_release, rq,
kernel/sched/ext.c
2415
cpu_of(rq), &args);
kernel/sched/ext.c
2417
rq->scx.cpu_released = true;
kernel/sched/ext.c
2421
static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
kernel/sched/ext.c
2427
smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
kernel/sched/ext.c
2429
update_curr_scx(rq);
kernel/sched/ext.c
2433
SCX_CALL_OP_TASK(sch, SCX_KF_REST, stopping, rq, p, true);
kernel/sched/ext.c
2436
set_task_runnable(rq, p);
kernel/sched/ext.c
2444
if (p->scx.slice && !scx_rq_bypassing(rq)) {
kernel/sched/ext.c
2445
dispatch_enqueue(sch, &rq->scx.local_dsq, p,
kernel/sched/ext.c
2458
do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
kernel/sched/ext.c
2460
do_enqueue_task(rq, p, 0, -1);
kernel/sched/ext.c
2466
switch_class(rq, next);
kernel/sched/ext.c
2469
static void kick_sync_wait_bal_cb(struct rq *rq)
kernel/sched/ext.c
2487
for_each_cpu(cpu, rq->scx.cpus_to_sync) {
kernel/sched/ext.c
2492
if (cpu == cpu_of(rq) ||
kernel/sched/ext.c
2494
cpumask_clear_cpu(cpu, rq->scx.cpus_to_sync);
kernel/sched/ext.c
2498
raw_spin_rq_unlock_irq(rq);
kernel/sched/ext.c
2500
smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
kernel/sched/ext.c
2503
raw_spin_rq_lock_irq(rq);
kernel/sched/ext.c
2511
static struct task_struct *first_local_task(struct rq *rq)
kernel/sched/ext.c
2513
return list_first_entry_or_null(&rq->scx.local_dsq.list,
kernel/sched/ext.c
2518
do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
kernel/sched/ext.c
2520
struct task_struct *prev = rq->curr;
kernel/sched/ext.c
2525
smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1);
kernel/sched/ext.c
2527
rq_modified_begin(rq, &ext_sched_class);
kernel/sched/ext.c
2529
rq_unpin_lock(rq, rf);
kernel/sched/ext.c
2530
balance_one(rq, prev);
kernel/sched/ext.c
2531
rq_repin_lock(rq, rf);
kernel/sched/ext.c
2532
maybe_queue_balance_callback(rq);
kernel/sched/ext.c
2539
if (unlikely(rq->scx.kick_sync_pending)) {
kernel/sched/ext.c
2540
rq->scx.kick_sync_pending = false;
kernel/sched/ext.c
2541
queue_balance_callback(rq, &rq->scx.kick_sync_bal_cb,
kernel/sched/ext.c
2553
if (!force_scx && rq_modified_above(rq, &ext_sched_class))
kernel/sched/ext.c
2556
keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP;
kernel/sched/ext.c
2573
p = first_local_task(rq);
kernel/sched/ext.c
2580
if (!scx_rq_bypassing(rq) && !sch->warned_zero_slice) {
kernel/sched/ext.c
2592
static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
kernel/sched/ext.c
2594
return do_pick_task_scx(rq, rf, false);
kernel/sched/ext.c
2609
return do_pick_task_scx(dl_se->rq, rf, true);
kernel/sched/ext.c
2615
void ext_server_init(struct rq *rq)
kernel/sched/ext.c
2617
struct sched_dl_entity *dl_se = &rq->ext_server;
kernel/sched/ext.c
2621
dl_server_init(dl_se, rq, ext_server_pick_task);
kernel/sched/ext.c
2719
static void task_woken_scx(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
2721
run_deferred(rq);
kernel/sched/ext.c
2747
static void handle_hotplug(struct rq *rq, bool online)
kernel/sched/ext.c
2750
int cpu = cpu_of(rq);
kernel/sched/ext.c
2776
void scx_rq_activate(struct rq *rq)
kernel/sched/ext.c
2778
handle_hotplug(rq, true);
kernel/sched/ext.c
2781
void scx_rq_deactivate(struct rq *rq)
kernel/sched/ext.c
2783
handle_hotplug(rq, false);
kernel/sched/ext.c
2786
static void rq_online_scx(struct rq *rq)
kernel/sched/ext.c
2788
rq->scx.flags |= SCX_RQ_ONLINE;
kernel/sched/ext.c
2791
static void rq_offline_scx(struct rq *rq)
kernel/sched/ext.c
2793
rq->scx.flags &= ~SCX_RQ_ONLINE;
kernel/sched/ext.c
2797
static bool check_rq_for_timeouts(struct rq *rq)
kernel/sched/ext.c
2804
rq_lock_irqsave(rq, &rf);
kernel/sched/ext.c
2809
list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node) {
kernel/sched/ext.c
2824
rq_unlock_irqrestore(rq, &rf);
kernel/sched/ext.c
2844
void scx_tick(struct rq *rq)
kernel/sched/ext.c
2866
update_other_load_avgs(rq);
kernel/sched/ext.c
2869
static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued)
kernel/sched/ext.c
2873
update_curr_scx(rq);
kernel/sched/ext.c
2879
if (scx_rq_bypassing(rq)) {
kernel/sched/ext.c
2881
touch_core_sched(rq, curr);
kernel/sched/ext.c
2883
SCX_CALL_OP_TASK(sch, SCX_KF_REST, tick, rq, curr);
kernel/sched/ext.c
2887
resched_curr(rq);
kernel/sched/ext.c
297
DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
kernel/sched/ext.c
2973
struct rq *rq;
kernel/sched/ext.c
2976
rq = task_rq_lock(p, &rf);
kernel/sched/ext.c
299
static inline void update_locked_rq(struct rq *rq)
kernel/sched/ext.c
2990
task_rq_unlock(rq, p, &rf);
kernel/sched/ext.c
3004
struct rq *rq = task_rq(p);
kernel/sched/ext.c
3007
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
3021
SCX_CALL_OP_TASK(sch, SCX_KF_REST, enable, rq, p);
kernel/sched/ext.c
3025
SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
kernel/sched/ext.c
3032
struct rq *rq = task_rq(p);
kernel/sched/ext.c
3034
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
3040
SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
kernel/sched/ext.c
306
if (rq)
kernel/sched/ext.c
307
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
308
__this_cpu_write(scx_locked_rq_state, rq);
kernel/sched/ext.c
311
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
kernel/sched/ext.c
3121
struct rq *rq;
kernel/sched/ext.c
3123
rq = task_rq_lock(p, &rf);
kernel/sched/ext.c
3125
task_rq_unlock(rq, p, &rf);
kernel/sched/ext.c
313
if (rq) \
kernel/sched/ext.c
3139
struct rq *rq;
kernel/sched/ext.c
314
update_locked_rq(rq); \
kernel/sched/ext.c
3142
rq = task_rq_lock(p, &rf);
kernel/sched/ext.c
3145
task_rq_unlock(rq, p, &rf);
kernel/sched/ext.c
3167
struct rq *rq = task_rq(p);
kernel/sched/ext.c
3169
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
3177
!task_on_cpu(rq, p);
kernel/sched/ext.c
3200
struct rq *rq;
kernel/sched/ext.c
3202
rq = task_rq_lock(p, &rf);
kernel/sched/ext.c
3204
task_rq_unlock(rq, p, &rf);
kernel/sched/ext.c
3208
static void reweight_task_scx(struct rq *rq, struct task_struct *p,
kernel/sched/ext.c
322
if (rq) \
kernel/sched/ext.c
3220
SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,
kernel/sched/ext.c
3224
static void prio_changed_scx(struct rq *rq, struct task_struct *p, u64 oldprio)
kernel/sched/ext.c
3228
static void switching_to_scx(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
3242
SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_cpumask, rq,
kernel/sched/ext.c
3246
static void switched_from_scx(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
3254
static void wakeup_preempt_scx(struct rq *rq, struct task_struct *p, int wake_flags) {}
kernel/sched/ext.c
3256
static void switched_to_scx(struct rq *rq, struct task_struct *p) {}
kernel/sched/ext.c
326
#define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
kernel/sched/ext.c
3271
bool scx_can_stop_tick(struct rq *rq)
kernel/sched/ext.c
3273
struct task_struct *p = rq->curr;
kernel/sched/ext.c
3275
if (scx_rq_bypassing(rq))
kernel/sched/ext.c
3286
return rq->scx.flags & SCX_RQ_CAN_STOP_TICK;
kernel/sched/ext.c
330
if (rq) \
kernel/sched/ext.c
331
update_locked_rq(rq); \
kernel/sched/ext.c
339
if (rq) \
kernel/sched/ext.c
355
#define SCX_CALL_OP_TASK(sch, mask, op, rq, task, args...) \
kernel/sched/ext.c
359
SCX_CALL_OP((sch), mask, op, rq, task, ##args); \
kernel/sched/ext.c
363
#define SCX_CALL_OP_TASK_RET(sch, mask, op, rq, task, args...) \
kernel/sched/ext.c
368
__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task, ##args); \
kernel/sched/ext.c
373
#define SCX_CALL_OP_2TASKS_RET(sch, mask, op, rq, task0, task1, args...) \
kernel/sched/ext.c
379
__ret = SCX_CALL_OP_RET((sch), mask, op, rq, task0, task1, ##args); \
kernel/sched/ext.c
3968
static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq,
kernel/sched/ext.c
3972
struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq;
kernel/sched/ext.c
3988
raw_spin_rq_lock_irq(rq);
kernel/sched/ext.c
3996
struct rq *donee_rq;
kernel/sched/ext.c
4049
raw_spin_rq_unlock_irq(rq);
kernel/sched/ext.c
4051
raw_spin_rq_lock_irq(rq);
kernel/sched/ext.c
4059
raw_spin_rq_unlock_irq(rq);
kernel/sched/ext.c
4106
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4107
struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq;
kernel/sched/ext.c
4116
nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask,
kernel/sched/ext.c
4245
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4248
raw_spin_rq_lock(rq);
kernel/sched/ext.c
4251
WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
kernel/sched/ext.c
4252
rq->scx.flags |= SCX_RQ_BYPASSING;
kernel/sched/ext.c
4254
WARN_ON_ONCE(!(rq->scx.flags & SCX_RQ_BYPASSING));
kernel/sched/ext.c
4255
rq->scx.flags &= ~SCX_RQ_BYPASSING;
kernel/sched/ext.c
4264
raw_spin_rq_unlock(rq);
kernel/sched/ext.c
4275
list_for_each_entry_safe_reverse(p, n, &rq->scx.runnable_list,
kernel/sched/ext.c
4285
resched_curr(rq);
kernel/sched/ext.c
4287
raw_spin_rq_unlock(rq);
kernel/sched/ext.c
4445
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4446
scx_rq_clock_invalidate(rq);
kernel/sched/ext.c
4746
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
4753
rq_lock_irqsave(rq, &rf);
kernel/sched/ext.c
4755
idle = list_empty(&rq->scx.runnable_list) &&
kernel/sched/ext.c
4756
rq->curr->sched_class == &idle_sched_class;
kernel/sched/ext.c
4772
cpu, rq->scx.nr_running, rq->scx.flags,
kernel/sched/ext.c
4773
rq->scx.cpu_released, rq->scx.ops_qseq,
kernel/sched/ext.c
4774
rq->scx.kick_sync);
kernel/sched/ext.c
4776
rq->curr->comm, rq->curr->pid,
kernel/sched/ext.c
4777
rq->curr->sched_class);
kernel/sched/ext.c
4778
if (!cpumask_empty(rq->scx.cpus_to_kick))
kernel/sched/ext.c
4780
cpumask_pr_args(rq->scx.cpus_to_kick));
kernel/sched/ext.c
4781
if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
kernel/sched/ext.c
4783
cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
kernel/sched/ext.c
4784
if (!cpumask_empty(rq->scx.cpus_to_preempt))
kernel/sched/ext.c
4786
cpumask_pr_args(rq->scx.cpus_to_preempt));
kernel/sched/ext.c
4787
if (!cpumask_empty(rq->scx.cpus_to_wait))
kernel/sched/ext.c
4789
cpumask_pr_args(rq->scx.cpus_to_wait));
kernel/sched/ext.c
4790
if (!cpumask_empty(rq->scx.cpus_to_sync))
kernel/sched/ext.c
4792
cpumask_pr_args(rq->scx.cpus_to_sync));
kernel/sched/ext.c
4819
if (rq->curr->sched_class == &ext_sched_class)
kernel/sched/ext.c
4820
scx_dump_task(&s, &dctx, rq->curr, '*');
kernel/sched/ext.c
4822
list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
kernel/sched/ext.c
4825
rq_unlock_irqrestore(rq, &rf);
kernel/sched/ext.c
513
struct rq *rq;
kernel/sched/ext.c
549
__balance_callbacks(iter->rq, &iter->rf);
kernel/sched/ext.c
550
task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
kernel/sched/ext.c
5646
static bool can_skip_idle_kick(struct rq *rq)
kernel/sched/ext.c
5648
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
5660
return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE);
kernel/sched/ext.c
5663
static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs)
kernel/sched/ext.c
5665
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
5671
raw_spin_rq_lock_irqsave(rq, flags);
kernel/sched/ext.c
5672
cur_class = rq->curr->sched_class;
kernel/sched/ext.c
5684
rq->curr->scx.slice = 0;
kernel/sched/ext.c
5691
ksyncs[cpu] = rq->scx.kick_sync;
kernel/sched/ext.c
5697
resched_curr(rq);
kernel/sched/ext.c
5703
raw_spin_rq_unlock_irqrestore(rq, flags);
kernel/sched/ext.c
5708
static void kick_one_cpu_if_idle(s32 cpu, struct rq *this_rq)
kernel/sched/ext.c
5710
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
5713
raw_spin_rq_lock_irqsave(rq, flags);
kernel/sched/ext.c
5715
if (!can_skip_idle_kick(rq) &&
kernel/sched/ext.c
5717
resched_curr(rq);
kernel/sched/ext.c
5719
raw_spin_rq_unlock_irqrestore(rq, flags);
kernel/sched/ext.c
5724
struct rq *this_rq = this_rq();
kernel/sched/ext.c
5850
struct rq *rq = cpu_rq(cpu);
kernel/sched/ext.c
5853
init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
kernel/sched/ext.c
5854
init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS);
kernel/sched/ext.c
5855
INIT_LIST_HEAD(&rq->scx.runnable_list);
kernel/sched/ext.c
5856
INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
kernel/sched/ext.c
5858
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
kernel/sched/ext.c
5859
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
kernel/sched/ext.c
5860
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
kernel/sched/ext.c
5861
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
kernel/sched/ext.c
5862
BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_sync, GFP_KERNEL, n));
kernel/sched/ext.c
5863
rq->scx.deferred_irq_work = IRQ_WORK_INIT_HARD(deferred_irq_workfn);
kernel/sched/ext.c
5864
rq->scx.kick_cpus_irq_work = IRQ_WORK_INIT_HARD(kick_cpus_irq_workfn);
kernel/sched/ext.c
6106
struct rq *this_rq, *src_rq, *locked_rq;
kernel/sched/ext.c
6266
flush_dispatch_buf(sch, dspc->rq);
kernel/sched/ext.c
6274
if (consume_dispatch_q(sch, dspc->rq, dsq)) {
kernel/sched/ext.c
6400
static u32 reenq_local(struct rq *rq)
kernel/sched/ext.c
6406
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
6413
list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list,
kernel/sched/ext.c
6429
dispatch_dequeue(rq, p);
kernel/sched/ext.c
6435
do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1);
kernel/sched/ext.c
6457
struct rq *rq;
kernel/sched/ext.c
6467
rq = cpu_rq(smp_processor_id());
kernel/sched/ext.c
6468
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
6470
return reenq_local(rq);
kernel/sched/ext.c
6575
struct rq *this_rq;
kernel/sched/ext.c
6599
struct rq *target_rq = cpu_rq(cpu);
kernel/sched/ext.c
675
iter->rq = task_rq_lock(p, &iter->rf);
kernel/sched/ext.c
7021
struct rq *rq;
kernel/sched/ext.c
7025
rq = this_rq();
kernel/sched/ext.c
7026
local_set(&rq->scx.reenq_local_deferred, 1);
kernel/sched/ext.c
7027
schedule_deferred(rq);
kernel/sched/ext.c
7108
struct rq *rq = cpu_rq(cpu), *locked_rq = scx_locked_rq();
kernel/sched/ext.c
7115
if (locked_rq && rq != locked_rq) {
kernel/sched/ext.c
7125
rq_lock_irqsave(rq, &rf);
kernel/sched/ext.c
7126
update_rq_clock(rq);
kernel/sched/ext.c
7129
rq->scx.cpuperf_target = perf;
kernel/sched/ext.c
7130
cpufreq_update_util(rq, 0);
kernel/sched/ext.c
7133
rq_unlock_irqrestore(rq, &rf);
kernel/sched/ext.c
7209
__bpf_kfunc struct rq *scx_bpf_cpu_rq(s32 cpu)
kernel/sched/ext.c
7238
__bpf_kfunc struct rq *scx_bpf_locked_rq(void)
kernel/sched/ext.c
7241
struct rq *rq;
kernel/sched/ext.c
7249
rq = scx_locked_rq();
kernel/sched/ext.c
7250
if (!rq) {
kernel/sched/ext.c
7255
return rq;
kernel/sched/ext.c
7345
struct rq *rq;
kernel/sched/ext.c
7350
rq = this_rq();
kernel/sched/ext.c
7351
if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) {
kernel/sched/ext.c
7360
clock = READ_ONCE(rq->scx.clock);
kernel/sched/ext.c
7369
clock = sched_clock_cpu(cpu_of(rq));
kernel/sched/ext.c
813
static void run_deferred(struct rq *rq)
kernel/sched/ext.c
815
process_ddsp_deferred_locals(rq);
kernel/sched/ext.c
817
if (local_read(&rq->scx.reenq_local_deferred)) {
kernel/sched/ext.c
818
local_set(&rq->scx.reenq_local_deferred, 0);
kernel/sched/ext.c
819
reenq_local(rq);
kernel/sched/ext.c
823
static void deferred_bal_cb_workfn(struct rq *rq)
kernel/sched/ext.c
825
run_deferred(rq);
kernel/sched/ext.c
830
struct rq *rq = container_of(irq_work, struct rq, scx.deferred_irq_work);
kernel/sched/ext.c
832
raw_spin_rq_lock(rq);
kernel/sched/ext.c
833
run_deferred(rq);
kernel/sched/ext.c
834
raw_spin_rq_unlock(rq);
kernel/sched/ext.c
845
static void schedule_deferred(struct rq *rq)
kernel/sched/ext.c
851
irq_work_queue(&rq->scx.deferred_irq_work);
kernel/sched/ext.c
861
static void schedule_deferred_locked(struct rq *rq)
kernel/sched/ext.c
863
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
870
if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
kernel/sched/ext.c
874
if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
kernel/sched/ext.c
888
if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
kernel/sched/ext.c
889
rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
kernel/sched/ext.c
898
schedule_deferred(rq);
kernel/sched/ext.c
911
static void touch_core_sched(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
913
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
924
p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq));
kernel/sched/ext.c
938
static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p)
kernel/sched/ext.c
940
lockdep_assert_rq_held(rq);
kernel/sched/ext.c
944
touch_core_sched(rq, p);
kernel/sched/ext.c
948
static void update_curr_scx(struct rq *rq)
kernel/sched/ext.c
950
struct task_struct *curr = rq->curr;
kernel/sched/ext.c
953
delta_exec = update_curr_common(rq);
kernel/sched/ext.c
960
touch_core_sched(rq, curr);
kernel/sched/ext.c
963
dl_server_update(&rq->ext_server, delta_exec);
kernel/sched/ext.c
996
struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
kernel/sched/ext.h
11
void scx_tick(struct rq *rq);
kernel/sched/ext.h
17
bool scx_can_stop_tick(struct rq *rq);
kernel/sched/ext.h
18
void scx_rq_activate(struct rq *rq);
kernel/sched/ext.h
19
void scx_rq_deactivate(struct rq *rq);
kernel/sched/ext.h
45
static inline void scx_tick(struct rq *rq) {}
kernel/sched/ext.h
51
static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
kernel/sched/ext.h
52
static inline void scx_rq_activate(struct rq *rq) {}
kernel/sched/ext.h
53
static inline void scx_rq_deactivate(struct rq *rq) {}
kernel/sched/ext.h
62
void __scx_update_idle(struct rq *rq, bool idle, bool do_notify);
kernel/sched/ext.h
64
static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify)
kernel/sched/ext.h
67
__scx_update_idle(rq, idle, do_notify);
kernel/sched/ext.h
70
static inline void scx_update_idle(struct rq *rq, bool idle, bool do_notify) {}
kernel/sched/ext_idle.c
733
void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
kernel/sched/ext_idle.c
736
int cpu = cpu_of(rq);
kernel/sched/ext_idle.c
738
lockdep_assert_rq_held(rq);
kernel/sched/ext_idle.c
755
if (do_notify || is_idle_task(rq->curr))
kernel/sched/ext_idle.c
770
if (SCX_HAS_OP(sch, update_idle) && do_notify && !scx_rq_bypassing(rq))
kernel/sched/ext_idle.c
771
SCX_CALL_OP(sch, SCX_KF_REST, update_idle, rq, cpu_of(rq), idle);
kernel/sched/ext_idle.c
895
struct rq *rq;
kernel/sched/ext_idle.c
913
rq = task_rq_lock(p, &rf);
kernel/sched/ext_idle.c
917
rq = scx_locked_rq();
kernel/sched/ext_idle.c
925
if (!rq)
kernel/sched/ext_idle.c
946
task_rq_unlock(rq, p, &rf);
kernel/sched/ext_internal.h
1164
DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
kernel/sched/ext_internal.h
1170
static inline struct rq *scx_locked_rq(void)
kernel/sched/ext_internal.h
1180
static inline bool scx_rq_bypassing(struct rq *rq)
kernel/sched/ext_internal.h
1182
return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
kernel/sched/fair.c
10004
static void __sched_balance_update_blocked_averages(struct rq *rq)
kernel/sched/fair.c
10008
update_blocked_load_tick(rq);
kernel/sched/fair.c
10010
decayed |= __update_blocked_others(rq, &done);
kernel/sched/fair.c
10011
decayed |= __update_blocked_fair(rq, &done);
kernel/sched/fair.c
10013
update_has_blocked_load_status(rq, !done);
kernel/sched/fair.c
10015
cpufreq_update_util(rq, 0);
kernel/sched/fair.c
10020
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
10022
guard(rq_lock_irqsave)(rq);
kernel/sched/fair.c
10023
update_rq_clock(rq);
kernel/sched/fair.c
10024
__sched_balance_update_blocked_averages(rq);
kernel/sched/fair.c
10091
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
10095
irq = cpu_util_irq(rq);
kernel/sched/fair.c
10104
used = cpu_util_rt(rq);
kernel/sched/fair.c
10105
used += cpu_util_dl(rq);
kernel/sched/fair.c
10192
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
kernel/sched/fair.c
10194
return ((rq->cpu_capacity * sd->imbalance_pct) <
kernel/sched/fair.c
10195
(arch_scale_cpu_capacity(cpu_of(rq)) * 100));
kernel/sched/fair.c
10199
static inline bool check_misfit_status(struct rq *rq)
kernel/sched/fair.c
10201
return rq->misfit_task_load;
kernel/sched/fair.c
10442
sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
kernel/sched/fair.c
10448
if (rq->cfs.h_nr_runnable != 1)
kernel/sched/fair.c
10451
return check_cpu_capacity(rq, sd);
kernel/sched/fair.c
10478
struct rq *rq = cpu_rq(i);
kernel/sched/fair.c
10479
unsigned long load = cpu_load(rq);
kernel/sched/fair.c
10483
sgs->group_runnable += cpu_runnable(rq);
kernel/sched/fair.c
10484
sgs->sum_h_nr_running += rq->cfs.h_nr_runnable;
kernel/sched/fair.c
10486
nr_running = rq->nr_running;
kernel/sched/fair.c
10508
sgs->nr_numa_running += rq->nr_numa_running;
kernel/sched/fair.c
10509
sgs->nr_preferred_running += rq->nr_preferred_running;
kernel/sched/fair.c
10517
if (sgs->group_misfit_task_load < rq->misfit_task_load) {
kernel/sched/fair.c
10518
sgs->group_misfit_task_load = rq->misfit_task_load;
kernel/sched/fair.c
10521
} else if (env->idle && sched_reduced_capacity(rq, env->sd)) {
kernel/sched/fair.c
10711
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
kernel/sched/fair.c
10713
if (rq->nr_running > rq->nr_numa_running)
kernel/sched/fair.c
10715
if (rq->nr_running > rq->nr_preferred_running)
kernel/sched/fair.c
10725
static inline enum fbq_type fbq_classify_rq(struct rq *rq)
kernel/sched/fair.c
10759
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
10761
if (rq->curr != rq->idle && rq->curr != p)
kernel/sched/fair.c
10770
if (rq->ttwu_pending)
kernel/sched/fair.c
10797
struct rq *rq = cpu_rq(i);
kernel/sched/fair.c
10800
sgs->group_load += cpu_load_without(rq, p);
kernel/sched/fair.c
10802
sgs->group_runnable += cpu_runnable_without(rq, p);
kernel/sched/fair.c
10804
sgs->sum_h_nr_running += rq->cfs.h_nr_runnable - local;
kernel/sched/fair.c
10806
nr_running = rq->nr_running - local;
kernel/sched/fair.c
11547
static struct rq *sched_balance_find_src_rq(struct lb_env *env,
kernel/sched/fair.c
11550
struct rq *busiest = NULL, *rq;
kernel/sched/fair.c
11560
rq = cpu_rq(i);
kernel/sched/fair.c
11561
rt = fbq_classify_rq(rq);
kernel/sched/fair.c
11585
nr_running = rq->cfs.h_nr_runnable;
kernel/sched/fair.c
11618
load = cpu_load(rq);
kernel/sched/fair.c
11621
!check_cpu_capacity(rq, env->sd))
kernel/sched/fair.c
11640
busiest = rq;
kernel/sched/fair.c
11657
busiest = rq;
kernel/sched/fair.c
11664
busiest = rq;
kernel/sched/fair.c
11673
if (rq->misfit_task_load > busiest_load) {
kernel/sched/fair.c
11674
busiest_load = rq->misfit_task_load;
kernel/sched/fair.c
11675
busiest = rq;
kernel/sched/fair.c
11865
static int sched_balance_rq(int this_cpu, struct rq *this_rq,
kernel/sched/fair.c
11872
struct rq *busiest;
kernel/sched/fair.c
12197
struct rq *busiest_rq = data;
kernel/sched/fair.c
12200
struct rq *target_rq = cpu_rq(target_cpu);
kernel/sched/fair.c
1232
static s64 update_se(struct rq *rq, struct sched_entity *se)
kernel/sched/fair.c
12331
static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
kernel/sched/fair.c
12334
int cpu = rq->cpu;
kernel/sched/fair.c
1234
u64 now = rq_clock_task(rq);
kernel/sched/fair.c
12366
if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
kernel/sched/fair.c
12388
rq->max_idle_balance_cost =
kernel/sched/fair.c
12399
rq->next_balance = next_balance;
kernel/sched/fair.c
12403
static inline int on_null_domain(struct rq *rq)
kernel/sched/fair.c
12405
return unlikely(!rcu_dereference_sched(rq->sd));
kernel/sched/fair.c
1244
struct task_struct *running = rq->curr;
kernel/sched/fair.c
12484
static void nohz_balancer_kick(struct rq *rq)
kernel/sched/fair.c
12489
int nr_busy, i, cpu = rq->cpu;
kernel/sched/fair.c
12492
if (unlikely(rq->idle_balance))
kernel/sched/fair.c
12499
nohz_balance_exit_idle(rq);
kernel/sched/fair.c
12524
if (rq->nr_running >= 2) {
kernel/sched/fair.c
12531
sd = rcu_dereference_all(rq->sd);
kernel/sched/fair.c
12537
if (rq->cfs.h_nr_runnable >= 1 && check_cpu_capacity(rq, sd)) {
kernel/sched/fair.c
12567
if (check_misfit_status(rq)) {
kernel/sched/fair.c
12625
void nohz_balance_exit_idle(struct rq *rq)
kernel/sched/fair.c
12627
WARN_ON_ONCE(rq != this_rq());
kernel/sched/fair.c
12629
if (likely(!rq->nohz_tick_stopped))
kernel/sched/fair.c
12632
rq->nohz_tick_stopped = 0;
kernel/sched/fair.c
12633
cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
kernel/sched/fair.c
12635
set_cpu_sd_state_busy(rq->cpu);
kernel/sched/fair.c
12660
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
12673
rq->has_blocked_load = 1;
kernel/sched/fair.c
12681
if (rq->nohz_tick_stopped)
kernel/sched/fair.c
12685
if (on_null_domain(rq))
kernel/sched/fair.c
12688
rq->nohz_tick_stopped = 1;
kernel/sched/fair.c
12710
static bool update_nohz_stats(struct rq *rq)
kernel/sched/fair.c
12712
unsigned int cpu = rq->cpu;
kernel/sched/fair.c
12714
if (!rq->has_blocked_load)
kernel/sched/fair.c
12720
if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
kernel/sched/fair.c
12725
return rq->has_blocked_load;
kernel/sched/fair.c
12733
static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
kernel/sched/fair.c
12742
struct rq *rq;
kernel/sched/fair.c
1278
s64 update_curr_common(struct rq *rq)
kernel/sched/fair.c
12788
rq = cpu_rq(balance_cpu);
kernel/sched/fair.c
12791
has_blocked_load |= update_nohz_stats(rq);
kernel/sched/fair.c
12797
if (time_after_eq(jiffies, rq->next_balance)) {
kernel/sched/fair.c
1280
return update_se(rq, &rq->donor->se);
kernel/sched/fair.c
12800
rq_lock_irqsave(rq, &rf);
kernel/sched/fair.c
12801
update_rq_clock(rq);
kernel/sched/fair.c
12802
rq_unlock_irqrestore(rq, &rf);
kernel/sched/fair.c
12805
sched_balance_domains(rq, CPU_IDLE);
kernel/sched/fair.c
12808
if (time_after(next_balance, rq->next_balance)) {
kernel/sched/fair.c
12809
next_balance = rq->next_balance;
kernel/sched/fair.c
12836
static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
kernel/sched/fair.c
12882
static void nohz_newidle_balance(struct rq *this_rq)
kernel/sched/fair.c
12903
static inline void nohz_balancer_kick(struct rq *rq) { }
kernel/sched/fair.c
12905
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
kernel/sched/fair.c
12910
static inline void nohz_newidle_balance(struct rq *this_rq) { }
kernel/sched/fair.c
12922
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
kernel/sched/fair.c
1295
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
1302
delta_exec = update_se(rq, curr);
kernel/sched/fair.c
13076
struct rq *this_rq = this_rq();
kernel/sched/fair.c
13097
void sched_balance_trigger(struct rq *rq)
kernel/sched/fair.c
13103
if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
kernel/sched/fair.c
13106
if (time_after_eq(jiffies, rq->next_balance))
kernel/sched/fair.c
13109
nohz_balancer_kick(rq);
kernel/sched/fair.c
13112
static void rq_online_fair(struct rq *rq)
kernel/sched/fair.c
13116
update_runtime_enabled(rq);
kernel/sched/fair.c
13119
static void rq_offline_fair(struct rq *rq)
kernel/sched/fair.c
13124
unthrottle_offline_cfs_rqs(rq);
kernel/sched/fair.c
13127
clear_tg_offline_cfs_rqs(rq);
kernel/sched/fair.c
13141
static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
kernel/sched/fair.c
13143
if (!sched_core_enabled(rq))
kernel/sched/fair.c
13160
if (rq->core->core_forceidle_count && rq->cfs.nr_queued == 1 &&
kernel/sched/fair.c
13162
resched_curr(rq);
kernel/sched/fair.c
1320
dl_server_update(&rq->fair_server, delta_exec);
kernel/sched/fair.c
1329
resched_curr_lazy(rq);
kernel/sched/fair.c
1334
static void update_curr_fair(struct rq *rq)
kernel/sched/fair.c
13347
void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
kernel/sched/fair.c
13354
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
kernel/sched/fair.c
1336
update_curr(cfs_rq_of(&rq->donor->se));
kernel/sched/fair.c
13360
struct rq *rq = task_rq(a);
kernel/sched/fair.c
13367
WARN_ON_ONCE(task_rq(b)->core != rq->core);
kernel/sched/fair.c
13384
se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
kernel/sched/fair.c
13385
se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
kernel/sched/fair.c
13417
static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
kernel/sched/fair.c
13428
static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
kernel/sched/fair.c
13440
hrtick_start_fair(rq, curr);
kernel/sched/fair.c
13445
task_tick_numa(rq, curr);
kernel/sched/fair.c
13447
update_misfit_status(curr, rq);
kernel/sched/fair.c
13450
task_tick_core(rq, curr);
kernel/sched/fair.c
13468
prio_changed_fair(struct rq *rq, struct task_struct *p, u64 oldprio)
kernel/sched/fair.c
13476
if (rq->cfs.nr_queued == 1)
kernel/sched/fair.c
13484
if (task_current_donor(rq, p)) {
kernel/sched/fair.c
13486
resched_curr(rq);
kernel/sched/fair.c
13488
wakeup_preempt(rq, p, 0);
kernel/sched/fair.c
13573
static void switching_from_fair(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
13576
dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
kernel/sched/fair.c
13579
static void switched_from_fair(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
13584
static void switched_to_fair(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
13598
if (task_current_donor(rq, p))
kernel/sched/fair.c
13599
resched_curr(rq);
kernel/sched/fair.c
13601
wakeup_preempt(rq, p, 0);
kernel/sched/fair.c
13605
static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
kernel/sched/fair.c
13614
list_move(&se->group_node, &rq->cfs_tasks);
kernel/sched/fair.c
13621
if (hrtick_enabled_fair(rq))
kernel/sched/fair.c
13622
hrtick_start_fair(rq, p);
kernel/sched/fair.c
13624
update_misfit_status(p, rq);
kernel/sched/fair.c
13625
sched_fair_update_stop_tick(rq, p);
kernel/sched/fair.c
13634
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
kernel/sched/fair.c
13646
__set_next_task_fair(rq, p, first);
kernel/sched/fair.c
13734
struct rq *rq;
kernel/sched/fair.c
13738
rq = cpu_rq(i);
kernel/sched/fair.c
13740
rq_lock_irq(rq, &rf);
kernel/sched/fair.c
13741
update_rq_clock(rq);
kernel/sched/fair.c
13744
rq_unlock_irq(rq, &rf);
kernel/sched/fair.c
13757
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
13761
guard(rq_lock_irqsave)(rq);
kernel/sched/fair.c
13763
update_rq_clock(rq);
kernel/sched/fair.c
13764
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
kernel/sched/fair.c
13776
guard(rq_lock_irqsave)(rq);
kernel/sched/fair.c
13786
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
13789
cfs_rq->rq = rq;
kernel/sched/fair.c
13800
se->cfs_rq = &rq->cfs;
kernel/sched/fair.c
13834
struct rq *rq = cpu_rq(i);
kernel/sched/fair.c
13839
rq_lock_irqsave(rq, &rf);
kernel/sched/fair.c
13840
update_rq_clock(rq);
kernel/sched/fair.c
13845
rq_unlock_irqrestore(rq, &rf);
kernel/sched/fair.c
13885
struct rq *rq = cpu_rq(i);
kernel/sched/fair.c
13892
rq_lock_irqsave(rq, &rf);
kernel/sched/fair.c
13917
rq_unlock_irqrestore(rq, &rf);
kernel/sched/fair.c
13933
static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
kernel/sched/fair.c
13942
if (rq->cfs.load.weight)
kernel/sched/fair.c
1649
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
1651
rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
kernel/sched/fair.c
1652
rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
kernel/sched/fair.c
1655
static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
1657
rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
kernel/sched/fair.c
1658
rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
kernel/sched/fair.c
2139
static unsigned long cpu_load(struct rq *rq);
kernel/sched/fair.c
2140
static unsigned long cpu_runnable(struct rq *rq);
kernel/sched/fair.c
2201
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
2203
ns->load += cpu_load(rq);
kernel/sched/fair.c
2204
ns->runnable += cpu_runnable(rq);
kernel/sched/fair.c
2206
ns->nr_running += rq->cfs.h_nr_runnable;
kernel/sched/fair.c
2209
if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
kernel/sched/fair.c
2210
if (READ_ONCE(rq->numa_migrate_on) ||
kernel/sched/fair.c
2233
struct rq *rq = cpu_rq(env->dst_cpu);
kernel/sched/fair.c
2236
if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
kernel/sched/fair.c
2248
rq = cpu_rq(env->dst_cpu);
kernel/sched/fair.c
2249
if (!xchg(&rq->numa_migrate_on, 1))
kernel/sched/fair.c
2263
rq = cpu_rq(env->best_cpu);
kernel/sched/fair.c
2264
WRITE_ONCE(rq->numa_migrate_on, 0);
kernel/sched/fair.c
2322
struct rq *dst_rq = cpu_rq(env->dst_cpu);
kernel/sched/fair.c
2580
struct rq *best_rq;
kernel/sched/fair.c
312
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
313
int cpu = cpu_of(rq);
kernel/sched/fair.c
316
return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
kernel/sched/fair.c
344
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
kernel/sched/fair.c
354
&rq->leaf_cfs_rq_list);
kernel/sched/fair.c
359
rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
kernel/sched/fair.c
3670
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
kernel/sched/fair.c
369
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
kernel/sched/fair.c
3736
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
kernel/sched/fair.c
374
rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
kernel/sched/fair.c
3740
static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
3744
static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
3759
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
3761
account_numa_enqueue(rq, task_of(se));
kernel/sched/fair.c
3762
list_add(&se->group_node, &rq->cfs_tasks);
kernel/sched/fair.c
381
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
390
if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
kernel/sched/fair.c
3902
static void reweight_task_fair(struct rq *rq, struct task_struct *p,
kernel/sched/fair.c
391
rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
kernel/sched/fair.c
398
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
kernel/sched/fair.c
400
WARN_ON_ONCE(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
kernel/sched/fair.c
404
#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
kernel/sched/fair.c
405
list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
kernel/sched/fair.c
4052
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
4054
if (&rq->cfs == cfs_rq) {
kernel/sched/fair.c
4069
cpufreq_update_util(rq, flags);
kernel/sched/fair.c
4114
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
4119
prev = rq->tmp_alone_branch;
kernel/sched/fair.c
4122
if (prev == &rq->leaf_cfs_rq_list)
kernel/sched/fair.c
4211
static void __maybe_unused clear_tg_offline_cfs_rqs(struct rq *rq)
kernel/sched/fair.c
4215
lockdep_assert_rq_held(rq);
kernel/sched/fair.c
4222
rq_clock_start_loop_update(rq);
kernel/sched/fair.c
4226
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
kernel/sched/fair.c
4232
rq_clock_stop_loop_update(rq);
kernel/sched/fair.c
4517
static inline void clear_tg_offline_cfs_rqs(struct rq *rq) {}
kernel/sched/fair.c
4533
struct rq *rq;
kernel/sched/fair.c
4540
rq = rq_of(cfs_rq);
kernel/sched/fair.c
4543
is_idle = is_idle_task(rcu_dereference_all(rq->curr));
kernel/sched/fair.c
4585
now = u64_u32_load(rq->clock_pelt_idle);
kernel/sched/fair.c
4603
now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
kernel/sched/fair.c
4845
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
kernel/sched/fair.c
486
static inline void assert_list_leaf_cfs_rq(struct rq *rq)
kernel/sched/fair.c
490
#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
kernel/sched/fair.c
491
for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
kernel/sched/fair.c
5122
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
kernel/sched/fair.c
5124
int cpu = cpu_of(rq);
kernel/sched/fair.c
5137
rq->misfit_task_load = 0;
kernel/sched/fair.c
5145
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
kernel/sched/fair.c
5334
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
5336
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
kernel/sched/fair.c
5479
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
5482
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
kernel/sched/fair.c
5533
static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
kernel/sched/fair.c
5543
pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq)
kernel/sched/fair.c
5549
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
kernel/sched/fair.c
5772
static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/fair.c
5778
struct rq *rq;
kernel/sched/fair.c
5804
rq = scope.rq;
kernel/sched/fair.c
5805
update_rq_clock(rq);
kernel/sched/fair.c
5807
dequeue_task_fair(rq, p, DEQUEUE_SLEEP | DEQUEUE_THROTTLE);
kernel/sched/fair.c
5814
resched_curr(rq);
kernel/sched/fair.c
5906
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/fair.c
5909
struct rq *rq = data;
kernel/sched/fair.c
5910
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
kernel/sched/fair.c
5917
cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
kernel/sched/fair.c
5923
u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
kernel/sched/fair.c
5969
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
5972
cfs_rq->throttled_clock = rq_clock(rq);
kernel/sched/fair.c
5975
cfs_rq->throttled_clock_self = rq_clock(rq);
kernel/sched/fair.c
5980
struct rq *rq = data;
kernel/sched/fair.c
5981
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
kernel/sched/fair.c
5992
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
kernel/sched/fair.c
6003
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
6030
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
kernel/sched/fair.c
6044
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
6046
struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
kernel/sched/fair.c
6062
update_rq_clock(rq);
kernel/sched/fair.c
6066
cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
kernel/sched/fair.c
6073
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
kernel/sched/fair.c
6088
assert_list_leaf_cfs_rq(rq);
kernel/sched/fair.c
6091
if (rq->curr == rq->idle && rq->cfs.nr_queued)
kernel/sched/fair.c
6092
resched_curr(rq);
kernel/sched/fair.c
6098
struct rq *rq = arg;
kernel/sched/fair.c
6101
rq_lock(rq, &rf);
kernel/sched/fair.c
6108
update_rq_clock(rq);
kernel/sched/fair.c
6109
rq_clock_start_loop_update(rq);
kernel/sched/fair.c
6120
list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
kernel/sched/fair.c
6130
rq_clock_stop_loop_update(rq);
kernel/sched/fair.c
6131
rq_unlock(rq, &rf);
kernel/sched/fair.c
6136
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
6139
if (rq == this_rq()) {
kernel/sched/fair.c
6148
first = list_empty(&rq->cfsb_csd_list);
kernel/sched/fair.c
6149
list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
kernel/sched/fair.c
6151
smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
kernel/sched/fair.c
6172
struct rq *rq;
kernel/sched/fair.c
6178
rq = rq_of(cfs_rq);
kernel/sched/fair.c
6185
rq_lock_irqsave(rq, &rf);
kernel/sched/fair.c
6208
if (cpu_of(rq) != this_cpu) {
kernel/sched/fair.c
6224
rq_unlock_irqrestore(rq, &rf);
kernel/sched/fair.c
6229
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
6231
rq_lock_irqsave(rq, &rf);
kernel/sched/fair.c
6238
rq_unlock_irqrestore(rq, &rf);
kernel/sched/fair.c
6619
struct rq *rq = cpu_rq(i);
kernel/sched/fair.c
6622
if (list_empty(&rq->cfsb_csd_list))
kernel/sched/fair.c
6626
__cfsb_csd_unthrottle(rq);
kernel/sched/fair.c
6639
static void __maybe_unused update_runtime_enabled(struct rq *rq)
kernel/sched/fair.c
6643
lockdep_assert_rq_held(rq);
kernel/sched/fair.c
6648
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
kernel/sched/fair.c
6658
static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
kernel/sched/fair.c
6662
lockdep_assert_rq_held(rq);
kernel/sched/fair.c
6665
if (cpumask_test_cpu(cpu_of(rq), cpu_active_mask))
kernel/sched/fair.c
6673
rq_clock_start_loop_update(rq);
kernel/sched/fair.c
6677
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
kernel/sched/fair.c
6700
rq_clock_stop_loop_update(rq);
kernel/sched/fair.c
6719
static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
6721
int cpu = cpu_of(rq);
kernel/sched/fair.c
6729
if (rq->nr_running != 1)
kernel/sched/fair.c
6786
static inline void update_runtime_enabled(struct rq *rq) {}
kernel/sched/fair.c
6787
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
kernel/sched/fair.c
6797
static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {}
kernel/sched/fair.c
6805
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
6809
WARN_ON_ONCE(task_rq(p) != rq);
kernel/sched/fair.c
6811
if (rq->cfs.h_nr_queued > 1) {
kernel/sched/fair.c
6817
if (task_current_donor(rq, p))
kernel/sched/fair.c
6818
resched_curr(rq);
kernel/sched/fair.c
6821
hrtick_start(rq, delta);
kernel/sched/fair.c
6830
static void hrtick_update(struct rq *rq)
kernel/sched/fair.c
6832
struct task_struct *donor = rq->donor;
kernel/sched/fair.c
6834
if (!hrtick_enabled_fair(rq) || donor->sched_class != &fair_sched_class)
kernel/sched/fair.c
6837
hrtick_start_fair(rq, donor);
kernel/sched/fair.c
6841
hrtick_start_fair(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
6845
static inline void hrtick_update(struct rq *rq)
kernel/sched/fair.c
6881
static inline void check_update_overutilized_status(struct rq *rq)
kernel/sched/fair.c
6888
if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu))
kernel/sched/fair.c
6889
set_rd_overutilized(rq->rd, 1);
kernel/sched/fair.c
6893
static int sched_idle_rq(struct rq *rq)
kernel/sched/fair.c
6895
return unlikely(rq->nr_running == rq->cfs.h_nr_idle &&
kernel/sched/fair.c
6896
rq->nr_running);
kernel/sched/fair.c
6941
enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/fair.c
6948
int rq_h_nr_queued = rq->cfs.h_nr_queued;
kernel/sched/fair.c
6961
util_est_enqueue(&rq->cfs, p);
kernel/sched/fair.c
6974
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
kernel/sched/fair.c
7029
if (!rq_h_nr_queued && rq->cfs.h_nr_queued)
kernel/sched/fair.c
7030
dl_server_start(&rq->fair_server);
kernel/sched/fair.c
7033
add_nr_running(rq, 1);
kernel/sched/fair.c
7050
check_update_overutilized_status(rq);
kernel/sched/fair.c
7052
assert_list_leaf_cfs_rq(rq);
kernel/sched/fair.c
7054
hrtick_update(rq);
kernel/sched/fair.c
7066
static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
kernel/sched/fair.c
7068
bool was_sched_idle = sched_idle_rq(rq);
kernel/sched/fair.c
7149
sub_nr_running(rq, h_nr_queued);
kernel/sched/fair.c
7152
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
kernel/sched/fair.c
7153
rq->next_balance = jiffies;
kernel/sched/fair.c
7160
hrtick_update(rq);
kernel/sched/fair.c
7167
__block_task(rq, p);
kernel/sched/fair.c
7178
static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/fair.c
7186
util_est_dequeue(&rq->cfs, p);
kernel/sched/fair.c
7188
util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
kernel/sched/fair.c
7189
if (dequeue_entities(rq, &p->se, flags) < 0)
kernel/sched/fair.c
7196
hrtick_update(rq);
kernel/sched/fair.c
7200
static inline unsigned int cfs_h_nr_delayed(struct rq *rq)
kernel/sched/fair.c
7202
return (rq->cfs.h_nr_queued - rq->cfs.h_nr_runnable);
kernel/sched/fair.c
7222
static unsigned long cpu_load(struct rq *rq)
kernel/sched/fair.c
7224
return cfs_rq_load_avg(&rq->cfs);
kernel/sched/fair.c
7240
static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
7246
if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
kernel/sched/fair.c
7247
return cpu_load(rq);
kernel/sched/fair.c
7249
cfs_rq = &rq->cfs;
kernel/sched/fair.c
7258
static unsigned long cpu_runnable(struct rq *rq)
kernel/sched/fair.c
7260
return cfs_rq_runnable_avg(&rq->cfs);
kernel/sched/fair.c
7263
static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
7269
if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
kernel/sched/fair.c
7270
return cpu_runnable(rq);
kernel/sched/fair.c
7272
cfs_rq = &rq->cfs;
kernel/sched/fair.c
7364
struct rq *rq = cpu_rq(this_cpu);
kernel/sched/fair.c
7366
if ((rq->nr_running - cfs_h_nr_delayed(rq)) == 1)
kernel/sched/fair.c
7461
struct rq *rq = cpu_rq(i);
kernel/sched/fair.c
7463
if (!sched_core_cookie_match(rq, p))
kernel/sched/fair.c
7470
struct cpuidle_state *idle = idle_get_state(rq);
kernel/sched/fair.c
7478
latest_idle_timestamp = rq->idle_stamp;
kernel/sched/fair.c
7481
rq->idle_stamp > latest_idle_timestamp) {
kernel/sched/fair.c
7487
latest_idle_timestamp = rq->idle_stamp;
kernel/sched/fair.c
7595
void __update_idle_core(struct rq *rq)
kernel/sched/fair.c
7597
int core = cpu_of(rq);
kernel/sched/fair.c
8126
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
8135
irq = cpu_util_irq(rq);
kernel/sched/fair.c
8151
*min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN));
kernel/sched/fair.c
8157
if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt))
kernel/sched/fair.c
8167
util = util_cfs + cpu_util_rt(rq);
kernel/sched/fair.c
8168
util += cpu_util_dl(rq);
kernel/sched/fair.c
8175
*max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX));
kernel/sched/fair.c
8442
struct rq *rq = cpu_rq(cpu);
kernel/sched/fair.c
8462
if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
kernel/sched/fair.c
8470
rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
kernel/sched/fair.c
8471
rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
kernel/sched/fair.c
8686
struct rq *rq;
kernel/sched/fair.c
8688
rq = task_rq_lock(p, &rf);
kernel/sched/fair.c
8690
update_rq_clock(rq);
kernel/sched/fair.c
8691
dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
kernel/sched/fair.c
8693
task_rq_unlock(rq, p, &rf);
kernel/sched/fair.c
8770
preempt_sync(struct rq *rq, int wake_flags,
kernel/sched/fair.c
8782
delta = rq_clock_task(rq) - se->exec_start;
kernel/sched/fair.c
8808
static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_flags)
kernel/sched/fair.c
8811
struct task_struct *donor = rq->donor;
kernel/sched/fair.c
8844
if (test_tsk_need_resched(rq->curr))
kernel/sched/fair.c
8908
preempt_action = preempt_sync(rq, wake_flags, pse, se);
kernel/sched/fair.c
8938
resched_curr_lazy(rq);
kernel/sched/fair.c
8941
static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
kernel/sched/fair.c
8949
cfs_rq = &rq->cfs;
kernel/sched/fair.c
8962
se = pick_next_entity(rq, cfs_rq);
kernel/sched/fair.c
8974
static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
kernel/sched/fair.c
8975
static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
kernel/sched/fair.c
8978
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
kernel/sched/fair.c
8979
__must_hold(__rq_lockp(rq))
kernel/sched/fair.c
8986
p = pick_task_fair(rq, rf);
kernel/sched/fair.c
8995
__put_prev_set_next_dl_server(rq, prev, p);
kernel/sched/fair.c
9029
__set_next_task_fair(rq, p, true);
kernel/sched/fair.c
9036
put_prev_set_next_task(rq, prev, p);
kernel/sched/fair.c
9041
new_tasks = sched_balance_newidle(rq, rf);
kernel/sched/fair.c
9062
return pick_task_fair(dl_se->rq, rf);
kernel/sched/fair.c
9065
void fair_server_init(struct rq *rq)
kernel/sched/fair.c
9067
struct sched_dl_entity *dl_se = &rq->fair_server;
kernel/sched/fair.c
9071
dl_server_init(dl_se, rq, fair_server_pick_task);
kernel/sched/fair.c
9077
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next)
kernel/sched/fair.c
9091
static void yield_task_fair(struct rq *rq)
kernel/sched/fair.c
9093
struct task_struct *curr = rq->donor;
kernel/sched/fair.c
9100
if (unlikely(rq->nr_running == 1))
kernel/sched/fair.c
9105
update_rq_clock(rq);
kernel/sched/fair.c
9115
rq_clock_skip_update(rq);
kernel/sched/fair.c
9131
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
9142
yield_task_fair(rq);
kernel/sched/fair.c
9328
struct rq *src_rq;
kernel/sched/fair.c
9332
struct rq *dst_rq;
kernel/sched/fair.c
9786
static void attach_task(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
9788
lockdep_assert_rq_held(rq);
kernel/sched/fair.c
9790
WARN_ON_ONCE(task_rq(p) != rq);
kernel/sched/fair.c
9791
activate_task(rq, p, ENQUEUE_NOCLOCK);
kernel/sched/fair.c
9792
wakeup_preempt(rq, p, 0);
kernel/sched/fair.c
9799
static void attach_one_task(struct rq *rq, struct task_struct *p)
kernel/sched/fair.c
9803
rq_lock(rq, &rf);
kernel/sched/fair.c
9804
update_rq_clock(rq);
kernel/sched/fair.c
9805
attach_task(rq, p);
kernel/sched/fair.c
9806
rq_unlock(rq, &rf);
kernel/sched/fair.c
9844
static inline bool others_have_blocked(struct rq *rq)
kernel/sched/fair.c
9846
if (cpu_util_rt(rq))
kernel/sched/fair.c
9849
if (cpu_util_dl(rq))
kernel/sched/fair.c
9852
if (hw_load_avg(rq))
kernel/sched/fair.c
9855
if (cpu_util_irq(rq))
kernel/sched/fair.c
9861
static inline void update_blocked_load_tick(struct rq *rq)
kernel/sched/fair.c
9863
WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
kernel/sched/fair.c
9866
static inline void update_has_blocked_load_status(struct rq *rq, bool has_blocked_load)
kernel/sched/fair.c
9869
rq->has_blocked_load = 0;
kernel/sched/fair.c
9873
static inline bool others_have_blocked(struct rq *rq) { return false; }
kernel/sched/fair.c
9874
static inline void update_blocked_load_tick(struct rq *rq) {}
kernel/sched/fair.c
9875
static inline void update_has_blocked_load_status(struct rq *rq, bool has_blocked_load) {}
kernel/sched/fair.c
9878
static bool __update_blocked_others(struct rq *rq, bool *done)
kernel/sched/fair.c
9886
updated = update_other_load_avgs(rq);
kernel/sched/fair.c
9888
if (others_have_blocked(rq))
kernel/sched/fair.c
9896
static bool __update_blocked_fair(struct rq *rq, bool *done)
kernel/sched/fair.c
9900
int cpu = cpu_of(rq);
kernel/sched/fair.c
9906
for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
kernel/sched/fair.c
9915
if (cfs_rq == &rq->cfs)
kernel/sched/fair.c
9946
struct rq *rq = rq_of(cfs_rq);
kernel/sched/fair.c
9947
struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
kernel/sched/fair.c
9986
static bool __update_blocked_fair(struct rq *rq, bool *done)
kernel/sched/fair.c
9988
struct cfs_rq *cfs_rq = &rq->cfs;
kernel/sched/idle.c
465
balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
kernel/sched/idle.c
473
static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/idle.c
475
resched_curr(rq);
kernel/sched/idle.c
478
static void update_curr_idle(struct rq *rq);
kernel/sched/idle.c
480
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
kernel/sched/idle.c
482
update_curr_idle(rq);
kernel/sched/idle.c
483
scx_update_idle(rq, false, true);
kernel/sched/idle.c
484
update_rq_avg_idle(rq);
kernel/sched/idle.c
487
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
kernel/sched/idle.c
489
update_idle_core(rq);
kernel/sched/idle.c
490
scx_update_idle(rq, true, true);
kernel/sched/idle.c
491
schedstat_inc(rq->sched_goidle);
kernel/sched/idle.c
492
next->se.exec_start = rq_clock_task(rq);
kernel/sched/idle.c
498
update_idle_rq_clock_pelt(rq);
kernel/sched/idle.c
501
struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
kernel/sched/idle.c
503
scx_update_idle(rq, true, false);
kernel/sched/idle.c
504
return rq->idle;
kernel/sched/idle.c
512
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/idle.c
514
raw_spin_rq_unlock_irq(rq);
kernel/sched/idle.c
517
raw_spin_rq_lock_irq(rq);
kernel/sched/idle.c
529
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
kernel/sched/idle.c
531
update_curr_idle(rq);
kernel/sched/idle.c
534
static void switching_to_idle(struct rq *rq, struct task_struct *p)
kernel/sched/idle.c
540
prio_changed_idle(struct rq *rq, struct task_struct *p, u64 oldprio)
kernel/sched/idle.c
548
static void update_curr_idle(struct rq *rq)
kernel/sched/idle.c
550
struct sched_entity *se = &rq->idle->se;
kernel/sched/idle.c
551
u64 now = rq_clock_task(rq);
kernel/sched/idle.c
560
dl_server_update_idle(&rq->fair_server, delta_exec);
kernel/sched/idle.c
562
dl_server_update_idle(&rq->ext_server, delta_exec);
kernel/sched/loadavg.c
235
static void calc_load_nohz_fold(struct rq *rq)
kernel/sched/loadavg.c
239
delta = calc_load_fold_active(rq, 0);
kernel/sched/loadavg.c
260
void calc_load_nohz_remote(struct rq *rq)
kernel/sched/loadavg.c
262
calc_load_nohz_fold(rq);
kernel/sched/loadavg.c
267
struct rq *this_rq = this_rq();
kernel/sched/loadavg.c
387
void calc_global_load_tick(struct rq *this_rq)
kernel/sched/loadavg.c
80
long calc_load_fold_active(struct rq *this_rq, long adjust)
kernel/sched/membarrier.c
240
struct rq *rq = this_rq();
kernel/sched/membarrier.c
245
if (READ_ONCE(rq->membarrier_state) == membarrier_state)
kernel/sched/membarrier.c
247
WRITE_ONCE(rq->membarrier_state, membarrier_state);
kernel/sched/membarrier.c
479
struct rq *rq = cpu_rq(cpu);
kernel/sched/membarrier.c
482
p = rcu_dereference(rq->curr);
kernel/sched/pelt.c
347
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
kernel/sched/pelt.c
349
if (___update_load_sum(now, &rq->avg_rt,
kernel/sched/pelt.c
354
___update_load_avg(&rq->avg_rt, 1);
kernel/sched/pelt.c
355
trace_pelt_rt_tp(rq);
kernel/sched/pelt.c
373
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
kernel/sched/pelt.c
375
if (___update_load_sum(now, &rq->avg_dl,
kernel/sched/pelt.c
380
___update_load_avg(&rq->avg_dl, 1);
kernel/sched/pelt.c
381
trace_pelt_dl_tp(rq);
kernel/sched/pelt.c
404
int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
kernel/sched/pelt.c
406
if (___update_load_sum(now, &rq->avg_hw,
kernel/sched/pelt.c
410
___update_load_avg(&rq->avg_hw, 1);
kernel/sched/pelt.c
411
trace_pelt_hw_tp(rq);
kernel/sched/pelt.c
431
int update_irq_load_avg(struct rq *rq, u64 running)
kernel/sched/pelt.c
440
running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
kernel/sched/pelt.c
441
running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
kernel/sched/pelt.c
454
ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
kernel/sched/pelt.c
458
ret += ___update_load_sum(rq->clock, &rq->avg_irq,
kernel/sched/pelt.c
464
___update_load_avg(&rq->avg_irq, 1);
kernel/sched/pelt.c
465
trace_pelt_irq_tp(rq);
kernel/sched/pelt.c
477
bool update_other_load_avgs(struct rq *rq)
kernel/sched/pelt.c
479
u64 now = rq_clock_pelt(rq);
kernel/sched/pelt.c
480
const struct sched_class *curr_class = rq->donor->sched_class;
kernel/sched/pelt.c
481
unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
kernel/sched/pelt.c
483
lockdep_assert_rq_held(rq);
kernel/sched/pelt.c
486
return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
kernel/sched/pelt.c
487
update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
kernel/sched/pelt.c
488
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
kernel/sched/pelt.c
489
update_irq_load_avg(rq, 0);
kernel/sched/pelt.h
100
static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
kernel/sched/pelt.h
102
if (unlikely(is_idle_task(rq->curr))) {
kernel/sched/pelt.h
103
_update_idle_rq_clock_pelt(rq);
kernel/sched/pelt.h
11
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
kernel/sched/pelt.h
12
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
kernel/sched/pelt.h
123
delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
kernel/sched/pelt.h
124
delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
kernel/sched/pelt.h
126
rq->clock_pelt += delta;
kernel/sched/pelt.h
13
bool update_other_load_avgs(struct rq *rq);
kernel/sched/pelt.h
138
static inline void update_idle_rq_clock_pelt(struct rq *rq)
kernel/sched/pelt.h
141
u32 util_sum = rq->cfs.avg.util_sum;
kernel/sched/pelt.h
142
util_sum += rq->avg_rt.util_sum;
kernel/sched/pelt.h
143
util_sum += rq->avg_dl.util_sum;
kernel/sched/pelt.h
155
rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
kernel/sched/pelt.h
157
_update_idle_rq_clock_pelt(rq);
kernel/sched/pelt.h
16
int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
kernel/sched/pelt.h
18
static inline u64 hw_load_avg(struct rq *rq)
kernel/sched/pelt.h
20
return READ_ONCE(rq->avg_hw.load_avg);
kernel/sched/pelt.h
24
update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
kernel/sched/pelt.h
29
static inline u64 hw_load_avg(struct rq *rq)
kernel/sched/pelt.h
36
int update_irq_load_avg(struct rq *rq, u64 running);
kernel/sched/pelt.h
39
update_irq_load_avg(struct rq *rq, u64 running)
kernel/sched/pelt.h
69
static inline u64 rq_clock_pelt(struct rq *rq)
kernel/sched/pelt.h
71
lockdep_assert_rq_held(rq);
kernel/sched/pelt.h
72
assert_clock_updated(rq);
kernel/sched/pelt.h
74
return rq->clock_pelt - rq->lost_idle_time;
kernel/sched/pelt.h
78
static inline void _update_idle_rq_clock_pelt(struct rq *rq)
kernel/sched/pelt.h
80
rq->clock_pelt = rq_clock_task(rq);
kernel/sched/pelt.h
82
u64_u32_store(rq->clock_idle, rq_clock(rq));
kernel/sched/pelt.h
85
u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
kernel/sched/psi.c
1006
void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
kernel/sched/psi.c
1020
lockdep_assert_rq_held(rq);
kernel/sched/psi.c
1025
delta = (s64)(irq - rq->psi_irq_time);
kernel/sched/psi.c
1028
rq->psi_irq_time = irq;
kernel/sched/psi.c
1059
struct rq *rq;
kernel/sched/psi.c
1072
rq = this_rq_lock_irq(&rf);
kernel/sched/psi.c
1077
rq_unlock_irq(rq, &rf);
kernel/sched/psi.c
1090
struct rq *rq;
kernel/sched/psi.c
1102
rq = this_rq_lock_irq(&rf);
kernel/sched/psi.c
1107
rq_unlock_irq(rq, &rf);
kernel/sched/psi.c
1159
struct rq *rq;
kernel/sched/psi.c
1170
rq = task_rq_lock(task, &rf);
kernel/sched/psi.c
1207
task_rq_unlock(rq, task, &rf);
kernel/sched/rq-offsets.c
9
DEFINE(RQ_nr_pinned, offsetof(struct rq, nr_pinned));
kernel/sched/rt.c
1001
resched_curr(rq);
kernel/sched/rt.c
1013
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
1015
BUG_ON(&rq->rt != rt_rq);
kernel/sched/rt.c
1020
BUG_ON(!rq->nr_running);
kernel/sched/rt.c
1022
sub_nr_running(rq, count);
kernel/sched/rt.c
1030
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
1032
BUG_ON(&rq->rt != rt_rq);
kernel/sched/rt.c
1041
add_nr_running(rq, rt_rq->rt_nr_running);
kernel/sched/rt.c
1046
cpufreq_update_util(rq, 0);
kernel/sched/rt.c
1052
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
1057
if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq)
kernel/sched/rt.c
1060
if (rq->online && prio < prev_prio)
kernel/sched/rt.c
1061
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
kernel/sched/rt.c
1067
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
1072
if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq)
kernel/sched/rt.c
1075
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
kernel/sched/rt.c
1076
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
kernel/sched/rt.c
1400
struct rq *rq = rq_of_rt_se(rt_se);
kernel/sched/rt.c
1407
enqueue_top_rt_rq(&rq->rt);
kernel/sched/rt.c
1412
struct rq *rq = rq_of_rt_se(rt_se);
kernel/sched/rt.c
1424
enqueue_top_rt_rq(&rq->rt);
kernel/sched/rt.c
1431
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/rt.c
1446
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
kernel/sched/rt.c
1447
enqueue_pushable_task(rq, p);
kernel/sched/rt.c
1450
static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/rt.c
1454
update_curr_rt(rq);
kernel/sched/rt.c
1457
dequeue_pushable_task(rq, p);
kernel/sched/rt.c
1480
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
kernel/sched/rt.c
1491
static void yield_task_rt(struct rq *rq)
kernel/sched/rt.c
1493
requeue_task_rt(rq, rq->donor, 0);
kernel/sched/rt.c
1502
struct rq *rq;
kernel/sched/rt.c
1509
rq = cpu_rq(cpu);
kernel/sched/rt.c
1512
curr = READ_ONCE(rq->curr); /* unlocked access */
kernel/sched/rt.c
1513
donor = READ_ONCE(rq->donor);
kernel/sched/rt.c
1571
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
1573
if (rq->curr->nr_cpus_allowed == 1 ||
kernel/sched/rt.c
1574
!cpupri_find(&rq->rd->cpupri, rq->donor, NULL))
kernel/sched/rt.c
1582
cpupri_find(&rq->rd->cpupri, p, NULL))
kernel/sched/rt.c
1590
requeue_task_rt(rq, p, 1);
kernel/sched/rt.c
1591
resched_curr(rq);
kernel/sched/rt.c
1594
static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
kernel/sched/rt.c
1596
if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
kernel/sched/rt.c
1603
rq_unpin_lock(rq, rf);
kernel/sched/rt.c
1604
pull_rt_task(rq);
kernel/sched/rt.c
1605
rq_repin_lock(rq, rf);
kernel/sched/rt.c
1608
return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
kernel/sched/rt.c
1614
static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/rt.c
1616
struct task_struct *donor = rq->donor;
kernel/sched/rt.c
1625
resched_curr(rq);
kernel/sched/rt.c
1641
if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr))
kernel/sched/rt.c
1642
check_preempt_equal_prio(rq, p);
kernel/sched/rt.c
1645
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
kernel/sched/rt.c
1648
struct rt_rq *rt_rq = &rq->rt;
kernel/sched/rt.c
1650
p->se.exec_start = rq_clock_task(rq);
kernel/sched/rt.c
1655
dequeue_pushable_task(rq, p);
kernel/sched/rt.c
1665
if (rq->donor->sched_class != &rt_sched_class)
kernel/sched/rt.c
1666
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
kernel/sched/rt.c
1668
rt_queue_push_tasks(rq);
kernel/sched/rt.c
1689
static struct task_struct *_pick_next_task_rt(struct rq *rq)
kernel/sched/rt.c
1692
struct rt_rq *rt_rq = &rq->rt;
kernel/sched/rt.c
1704
static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf)
kernel/sched/rt.c
1708
if (!sched_rt_runnable(rq))
kernel/sched/rt.c
1711
p = _pick_next_task_rt(rq);
kernel/sched/rt.c
1716
static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
kernel/sched/rt.c
1719
struct rt_rq *rt_rq = &rq->rt;
kernel/sched/rt.c
1724
update_curr_rt(rq);
kernel/sched/rt.c
1726
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
kernel/sched/rt.c
1735
enqueue_pushable_task(rq, p);
kernel/sched/rt.c
1745
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
kernel/sched/rt.c
1747
struct plist_head *head = &rq->rt.pushable_tasks;
kernel/sched/rt.c
1750
if (!has_pushable_tasks(rq))
kernel/sched/rt.c
1754
if (task_is_pushable(rq, p, cpu))
kernel/sched/rt.c
178
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
kernel/sched/rt.c
182
return rt_rq->rq;
kernel/sched/rt.c
1854
static struct task_struct *pick_next_pushable_task(struct rq *rq)
kernel/sched/rt.c
1858
if (!has_pushable_tasks(rq))
kernel/sched/rt.c
1861
p = plist_first_entry(&rq->rt.pushable_tasks,
kernel/sched/rt.c
1864
BUG_ON(rq->cpu != task_cpu(p));
kernel/sched/rt.c
1865
BUG_ON(task_current(rq, p));
kernel/sched/rt.c
1866
BUG_ON(task_current_donor(rq, p));
kernel/sched/rt.c
1876
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
kernel/sched/rt.c
1878
struct rq *lowest_rq = NULL;
kernel/sched/rt.c
1885
if ((cpu == -1) || (cpu == rq->cpu))
kernel/sched/rt.c
1901
if (double_lock_balance(rq, lowest_rq)) {
kernel/sched/rt.c
191
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
kernel/sched/rt.c
1914
task != pick_next_pushable_task(rq))) {
kernel/sched/rt.c
1916
double_unlock_balance(rq, lowest_rq);
kernel/sched/rt.c
1927
double_unlock_balance(rq, lowest_rq);
kernel/sched/rt.c
1939
static int push_rt_task(struct rq *rq, bool pull)
kernel/sched/rt.c
1942
struct rq *lowest_rq;
kernel/sched/rt.c
1945
if (!rq->rt.overloaded)
kernel/sched/rt.c
1948
next_task = pick_next_pushable_task(rq);
kernel/sched/rt.c
1958
if (unlikely(next_task->prio < rq->donor->prio)) {
kernel/sched/rt.c
1959
resched_curr(rq);
kernel/sched/rt.c
196
return rt_rq->rq;
kernel/sched/rt.c
1967
if (!pull || rq->push_busy)
kernel/sched/rt.c
1979
if (rq->donor->sched_class != &rt_sched_class)
kernel/sched/rt.c
1982
cpu = find_lowest_rq(rq->curr);
kernel/sched/rt.c
1983
if (cpu == -1 || cpu == rq->cpu)
kernel/sched/rt.c
1992
push_task = get_push_task(rq);
kernel/sched/rt.c
1995
raw_spin_rq_unlock(rq);
kernel/sched/rt.c
1996
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
kernel/sched/rt.c
1997
push_task, &rq->push_work);
kernel/sched/rt.c
1999
raw_spin_rq_lock(rq);
kernel/sched/rt.c
2005
if (WARN_ON(next_task == rq->curr))
kernel/sched/rt.c
2012
lowest_rq = find_lock_lowest_rq(next_task, rq);
kernel/sched/rt.c
2023
task = pick_next_pushable_task(rq);
kernel/sched/rt.c
2046
move_queued_task_locked(rq, lowest_rq, next_task);
kernel/sched/rt.c
2050
double_unlock_balance(rq, lowest_rq);
kernel/sched/rt.c
2057
static void push_rt_tasks(struct rq *rq)
kernel/sched/rt.c
2060
while (push_rt_task(rq, false))
kernel/sched/rt.c
2169
static void tell_cpu_to_push(struct rq *rq)
kernel/sched/rt.c
2174
atomic_inc(&rq->rd->rto_loop_next);
kernel/sched/rt.c
2177
if (!rto_start_trylock(&rq->rd->rto_loop_start))
kernel/sched/rt.c
2180
raw_spin_lock(&rq->rd->rto_lock);
kernel/sched/rt.c
2188
if (rq->rd->rto_cpu < 0)
kernel/sched/rt.c
2189
cpu = rto_next_cpu(rq->rd);
kernel/sched/rt.c
2191
raw_spin_unlock(&rq->rd->rto_lock);
kernel/sched/rt.c
2193
rto_start_unlock(&rq->rd->rto_loop_start);
kernel/sched/rt.c
2197
sched_get_rd(rq->rd);
kernel/sched/rt.c
2198
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
kernel/sched/rt.c
2207
struct rq *rq;
kernel/sched/rt.c
2210
rq = this_rq();
kernel/sched/rt.c
2216
if (has_pushable_tasks(rq)) {
kernel/sched/rt.c
2217
raw_spin_rq_lock(rq);
kernel/sched/rt.c
2218
while (push_rt_task(rq, true))
kernel/sched/rt.c
2220
raw_spin_rq_unlock(rq);
kernel/sched/rt.c
2240
static void pull_rt_task(struct rq *this_rq)
kernel/sched/rt.c
2245
struct rq *src_rq;
kernel/sched/rt.c
230
struct rq *rq = cpu_rq(cpu);
kernel/sched/rt.c
234
rt_rq->rq = rq;
kernel/sched/rt.c
2353
static void task_woken_rt(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
2355
bool need_to_push = !task_on_cpu(rq, p) &&
kernel/sched/rt.c
2356
!test_tsk_need_resched(rq->curr) &&
kernel/sched/rt.c
2358
(dl_task(rq->donor) || rt_task(rq->donor)) &&
kernel/sched/rt.c
2359
(rq->curr->nr_cpus_allowed < 2 ||
kernel/sched/rt.c
2360
rq->donor->prio <= p->prio);
kernel/sched/rt.c
2363
push_rt_tasks(rq);
kernel/sched/rt.c
2367
static void rq_online_rt(struct rq *rq)
kernel/sched/rt.c
2369
if (rq->rt.overloaded)
kernel/sched/rt.c
2370
rt_set_overload(rq);
kernel/sched/rt.c
2372
__enable_runtime(rq);
kernel/sched/rt.c
2374
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
kernel/sched/rt.c
2378
static void rq_offline_rt(struct rq *rq)
kernel/sched/rt.c
2380
if (rq->rt.overloaded)
kernel/sched/rt.c
2381
rt_clear_overload(rq);
kernel/sched/rt.c
2383
__disable_runtime(rq);
kernel/sched/rt.c
2385
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
kernel/sched/rt.c
2392
static void switched_from_rt(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
2401
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
kernel/sched/rt.c
2404
rt_queue_pull_task(rq);
kernel/sched/rt.c
2422
static void switched_to_rt(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
2428
if (task_current(rq, p)) {
kernel/sched/rt.c
2429
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
kernel/sched/rt.c
2439
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
kernel/sched/rt.c
244
rt_se->rt_rq = &rq->rt;
kernel/sched/rt.c
2440
rt_queue_push_tasks(rq);
kernel/sched/rt.c
2441
if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq)))
kernel/sched/rt.c
2442
resched_curr(rq);
kernel/sched/rt.c
2451
prio_changed_rt(struct rq *rq, struct task_struct *p, u64 oldprio)
kernel/sched/rt.c
2459
if (task_current_donor(rq, p)) {
kernel/sched/rt.c
2465
rt_queue_pull_task(rq);
kernel/sched/rt.c
2471
if (p->prio > rq->rt.highest_prio.curr)
kernel/sched/rt.c
2472
resched_curr(rq);
kernel/sched/rt.c
2479
if (p->prio < rq->donor->prio)
kernel/sched/rt.c
2480
resched_curr(rq);
kernel/sched/rt.c
2485
static void watchdog(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
2509
static inline void watchdog(struct rq *rq, struct task_struct *p) { }
kernel/sched/rt.c
2520
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
kernel/sched/rt.c
2524
update_curr_rt(rq);
kernel/sched/rt.c
2525
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
kernel/sched/rt.c
2527
watchdog(rq, p);
kernel/sched/rt.c
2547
requeue_task_rt(rq, p, 0);
kernel/sched/rt.c
2548
resched_curr(rq);
kernel/sched/rt.c
2554
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
kernel/sched/rt.c
304
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
kernel/sched/rt.c
306
return container_of(rt_rq, struct rq, rt);
kernel/sched/rt.c
309
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
kernel/sched/rt.c
318
struct rq *rq = rq_of_rt_se(rt_se);
kernel/sched/rt.c
320
return &rq->rt;
kernel/sched/rt.c
333
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
kernel/sched/rt.c
336
return rq->online && rq->rt.highest_prio.curr > prev->prio;
kernel/sched/rt.c
339
static inline int rt_overloaded(struct rq *rq)
kernel/sched/rt.c
341
return atomic_read(&rq->rd->rto_count);
kernel/sched/rt.c
344
static inline void rt_set_overload(struct rq *rq)
kernel/sched/rt.c
346
if (!rq->online)
kernel/sched/rt.c
349
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
kernel/sched/rt.c
360
atomic_inc(&rq->rd->rto_count);
kernel/sched/rt.c
363
static inline void rt_clear_overload(struct rq *rq)
kernel/sched/rt.c
365
if (!rq->online)
kernel/sched/rt.c
369
atomic_dec(&rq->rd->rto_count);
kernel/sched/rt.c
370
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
kernel/sched/rt.c
373
static inline int has_pushable_tasks(struct rq *rq)
kernel/sched/rt.c
375
return !plist_head_empty(&rq->rt.pushable_tasks);
kernel/sched/rt.c
381
static void push_rt_tasks(struct rq *);
kernel/sched/rt.c
382
static void pull_rt_task(struct rq *);
kernel/sched/rt.c
384
static inline void rt_queue_push_tasks(struct rq *rq)
kernel/sched/rt.c
386
if (!has_pushable_tasks(rq))
kernel/sched/rt.c
389
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
kernel/sched/rt.c
392
static inline void rt_queue_pull_task(struct rq *rq)
kernel/sched/rt.c
394
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
kernel/sched/rt.c
397
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
399
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
kernel/sched/rt.c
401
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
kernel/sched/rt.c
404
if (p->prio < rq->rt.highest_prio.next)
kernel/sched/rt.c
405
rq->rt.highest_prio.next = p->prio;
kernel/sched/rt.c
407
if (!rq->rt.overloaded) {
kernel/sched/rt.c
408
rt_set_overload(rq);
kernel/sched/rt.c
409
rq->rt.overloaded = 1;
kernel/sched/rt.c
413
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
kernel/sched/rt.c
415
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
kernel/sched/rt.c
418
if (has_pushable_tasks(rq)) {
kernel/sched/rt.c
419
p = plist_first_entry(&rq->rt.pushable_tasks,
kernel/sched/rt.c
421
rq->rt.highest_prio.next = p->prio;
kernel/sched/rt.c
423
rq->rt.highest_prio.next = MAX_RT_PRIO-1;
kernel/sched/rt.c
425
if (rq->rt.overloaded) {
kernel/sched/rt.c
426
rt_clear_overload(rq);
kernel/sched/rt.c
427
rq->rt.overloaded = 0;
kernel/sched/rt.c
511
#define for_each_rt_rq(rt_rq, iter, rq) \
kernel/sched/rt.c
513
iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \
kernel/sched/rt.c
530
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
533
int cpu = cpu_of(rq);
kernel/sched/rt.c
544
resched_curr(rq);
kernel/sched/rt.c
660
static void __disable_runtime(struct rq *rq)
kernel/sched/rt.c
662
struct root_domain *rd = rq->rd;
kernel/sched/rt.c
669
for_each_rt_rq(rt_rq, iter, rq) {
kernel/sched/rt.c
742
static void __enable_runtime(struct rq *rq)
kernel/sched/rt.c
753
for_each_rt_rq(rt_rq, iter, rq) {
kernel/sched/rt.c
800
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
816
rq_lock(rq, &rf);
kernel/sched/rt.c
817
update_rq_clock(rq);
kernel/sched/rt.c
838
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
kernel/sched/rt.c
839
rq_clock_cancel_skipupdate(rq);
kernel/sched/rt.c
854
rq_unlock(rq, &rf);
kernel/sched/rt.c
910
#define for_each_rt_rq(rt_rq, iter, rq) \
kernel/sched/rt.c
911
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
kernel/sched/rt.c
923
struct rq *rq = rq_of_rt_rq(rt_rq);
kernel/sched/rt.c
929
resched_curr(rq);
kernel/sched/rt.c
953
static void __enable_runtime(struct rq *rq) { }
kernel/sched/rt.c
954
static void __disable_runtime(struct rq *rq) { }
kernel/sched/rt.c
974
static void update_curr_rt(struct rq *rq)
kernel/sched/rt.c
976
struct task_struct *donor = rq->donor;
kernel/sched/rt.c
982
delta_exec = update_curr_common(rq);
kernel/sched/sched.h
1054
extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
kernel/sched/sched.h
107
extern void calc_global_load_tick(struct rq *this_rq);
kernel/sched/sched.h
108
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
kernel/sched/sched.h
110
extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
kernel/sched/sched.h
1321
struct rq *core;
kernel/sched/sched.h
1352
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
kernel/sched/sched.h
1354
return cfs_rq->rq;
kernel/sched/sched.h
1359
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
kernel/sched/sched.h
1361
return container_of(cfs_rq, struct rq, cfs);
kernel/sched/sched.h
1365
static inline int cpu_of(struct rq *rq)
kernel/sched/sched.h
1367
return rq->cpu;
kernel/sched/sched.h
1377
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
kernel/sched/sched.h
1385
static __always_inline struct rq *__this_rq(void)
kernel/sched/sched.h
1396
static inline bool idle_rq(struct rq *rq)
kernel/sched/sched.h
1398
return rq->curr == rq->idle && !rq->nr_running && !rq->ttwu_pending;
kernel/sched/sched.h
1419
static inline void rq_set_donor(struct rq *rq, struct task_struct *t)
kernel/sched/sched.h
1421
rcu_assign_pointer(rq->donor, t);
kernel/sched/sched.h
1424
static inline void rq_set_donor(struct rq *rq, struct task_struct *t)
kernel/sched/sched.h
1435
static inline bool sched_core_enabled(struct rq *rq)
kernel/sched/sched.h
1437
return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
kernel/sched/sched.h
1449
static inline raw_spinlock_t *rq_lockp(struct rq *rq)
kernel/sched/sched.h
1451
if (sched_core_enabled(rq))
kernel/sched/sched.h
1452
return &rq->core->__lock;
kernel/sched/sched.h
1454
return &rq->__lock;
kernel/sched/sched.h
1457
static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
kernel/sched/sched.h
1458
__returns_ctx_lock(rq_lockp(rq)) /* alias them */
kernel/sched/sched.h
1460
if (rq->core_enabled)
kernel/sched/sched.h
1461
return &rq->core->__lock;
kernel/sched/sched.h
1463
return &rq->__lock;
kernel/sched/sched.h
1469
extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
kernel/sched/sched.h
1477
static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
1480
if (!sched_core_enabled(rq))
kernel/sched/sched.h
1483
return rq->core->core_cookie == p->core_cookie;
kernel/sched/sched.h
1486
static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
1492
if (!sched_core_enabled(rq))
kernel/sched/sched.h
1495
if (rq->core->core_cookie == p->core_cookie)
kernel/sched/sched.h
1498
for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
kernel/sched/sched.h
1512
static inline bool sched_group_cookie_match(struct rq *rq,
kernel/sched/sched.h
1519
if (!sched_core_enabled(rq))
kernel/sched/sched.h
1534
extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
kernel/sched/sched.h
1535
extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
1542
static inline bool sched_core_enabled(struct rq *rq)
kernel/sched/sched.h
1552
static inline raw_spinlock_t *rq_lockp(struct rq *rq)
kernel/sched/sched.h
1554
return &rq->__lock;
kernel/sched/sched.h
1557
static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
kernel/sched/sched.h
1558
__returns_ctx_lock(rq_lockp(rq)) /* alias them */
kernel/sched/sched.h
1560
return &rq->__lock;
kernel/sched/sched.h
1563
static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
1568
static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
1573
static inline bool sched_group_cookie_match(struct rq *rq,
kernel/sched/sched.h
1600
static inline void lockdep_assert_rq_held(struct rq *rq)
kernel/sched/sched.h
1601
__assumes_ctx_lock(__rq_lockp(rq))
kernel/sched/sched.h
1603
lockdep_assert_held(__rq_lockp(rq));
kernel/sched/sched.h
1606
extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
kernel/sched/sched.h
1607
__acquires(__rq_lockp(rq));
kernel/sched/sched.h
1609
extern bool raw_spin_rq_trylock(struct rq *rq)
kernel/sched/sched.h
1610
__cond_acquires(true, __rq_lockp(rq));
kernel/sched/sched.h
1612
extern void raw_spin_rq_unlock(struct rq *rq)
kernel/sched/sched.h
1613
__releases(__rq_lockp(rq));
kernel/sched/sched.h
1615
static inline void raw_spin_rq_lock(struct rq *rq)
kernel/sched/sched.h
1616
__acquires(__rq_lockp(rq))
kernel/sched/sched.h
1618
raw_spin_rq_lock_nested(rq, 0);
kernel/sched/sched.h
1621
static inline void raw_spin_rq_lock_irq(struct rq *rq)
kernel/sched/sched.h
1622
__acquires(__rq_lockp(rq))
kernel/sched/sched.h
1625
raw_spin_rq_lock(rq);
kernel/sched/sched.h
1628
static inline void raw_spin_rq_unlock_irq(struct rq *rq)
kernel/sched/sched.h
1629
__releases(__rq_lockp(rq))
kernel/sched/sched.h
1631
raw_spin_rq_unlock(rq);
kernel/sched/sched.h
1635
static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
kernel/sched/sched.h
1636
__acquires(__rq_lockp(rq))
kernel/sched/sched.h
1641
raw_spin_rq_lock(rq);
kernel/sched/sched.h
1646
static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
kernel/sched/sched.h
1647
__releases(__rq_lockp(rq))
kernel/sched/sched.h
1649
raw_spin_rq_unlock(rq);
kernel/sched/sched.h
1653
#define raw_spin_rq_lock_irqsave(rq, flags) \
kernel/sched/sched.h
1655
flags = _raw_spin_rq_lock_irqsave(rq); \
kernel/sched/sched.h
1659
extern void __update_idle_core(struct rq *rq);
kernel/sched/sched.h
1661
static inline void update_idle_core(struct rq *rq)
kernel/sched/sched.h
1664
__update_idle_core(rq);
kernel/sched/sched.h
1668
static inline void update_idle_core(struct rq *rq) { }
kernel/sched/sched.h
1708
struct rq *rq = task_rq(p);
kernel/sched/sched.h
1710
return &rq->cfs;
kernel/sched/sched.h
1721
extern void update_rq_avg_idle(struct rq *rq);
kernel/sched/sched.h
1722
extern void update_rq_clock(struct rq *rq);
kernel/sched/sched.h
1751
static inline void assert_clock_updated(struct rq *rq)
kernel/sched/sched.h
1757
WARN_ON_ONCE(rq->clock_update_flags < RQCF_ACT_SKIP);
kernel/sched/sched.h
1760
static inline u64 rq_clock(struct rq *rq)
kernel/sched/sched.h
1762
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
1763
assert_clock_updated(rq);
kernel/sched/sched.h
1765
return rq->clock;
kernel/sched/sched.h
1768
static inline u64 rq_clock_task(struct rq *rq)
kernel/sched/sched.h
1770
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
1771
assert_clock_updated(rq);
kernel/sched/sched.h
1773
return rq->clock_task;
kernel/sched/sched.h
1776
static inline void rq_clock_skip_update(struct rq *rq)
kernel/sched/sched.h
1778
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
1779
rq->clock_update_flags |= RQCF_REQ_SKIP;
kernel/sched/sched.h
1786
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
kernel/sched/sched.h
1788
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
1789
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
kernel/sched/sched.h
1801
static inline void rq_clock_start_loop_update(struct rq *rq)
kernel/sched/sched.h
1803
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
1804
WARN_ON_ONCE(rq->clock_update_flags & RQCF_ACT_SKIP);
kernel/sched/sched.h
1805
rq->clock_update_flags |= RQCF_ACT_SKIP;
kernel/sched/sched.h
1808
static inline void rq_clock_stop_loop_update(struct rq *rq)
kernel/sched/sched.h
1810
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
1811
rq->clock_update_flags &= ~RQCF_ACT_SKIP;
kernel/sched/sched.h
1836
static inline void scx_rq_clock_update(struct rq *rq, u64 clock)
kernel/sched/sched.h
1840
WRITE_ONCE(rq->scx.clock, clock);
kernel/sched/sched.h
1841
smp_store_release(&rq->scx.flags, rq->scx.flags | SCX_RQ_CLK_VALID);
kernel/sched/sched.h
1844
static inline void scx_rq_clock_invalidate(struct rq *rq)
kernel/sched/sched.h
1848
WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID);
kernel/sched/sched.h
1855
static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {}
kernel/sched/sched.h
1856
static inline void scx_rq_clock_invalidate(struct rq *rq) {}
kernel/sched/sched.h
1869
static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1871
rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
kernel/sched/sched.h
1873
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
kernel/sched/sched.h
1875
WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback);
kernel/sched/sched.h
1878
static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1880
if (rq->clock_update_flags > RQCF_ACT_SKIP)
kernel/sched/sched.h
1883
scx_rq_clock_invalidate(rq);
kernel/sched/sched.h
1884
lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
kernel/sched/sched.h
1887
static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1889
lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
kernel/sched/sched.h
1894
rq->clock_update_flags |= rf->clock_update_flags;
kernel/sched/sched.h
1898
extern struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires_ret;
kernel/sched/sched.h
1901
extern struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
kernel/sched/sched.h
1905
__task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
kernel/sched/sched.h
1906
__releases(__rq_lockp(rq))
kernel/sched/sched.h
1908
rq_unpin_lock(rq, rf);
kernel/sched/sched.h
1909
raw_spin_rq_unlock(rq);
kernel/sched/sched.h
1913
task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
kernel/sched/sched.h
1914
__releases(__rq_lockp(rq), &p->pi_lock)
kernel/sched/sched.h
1916
__task_rq_unlock(rq, p, rf);
kernel/sched/sched.h
1921
_T->rq = task_rq_lock(_T->lock, &_T->rf),
kernel/sched/sched.h
1922
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
kernel/sched/sched.h
1923
struct rq *rq; struct rq_flags rf)
kernel/sched/sched.h
1928
_T->rq = __task_rq_lock(_T->lock, &_T->rf),
kernel/sched/sched.h
1929
__task_rq_unlock(_T->rq, _T->lock, &_T->rf),
kernel/sched/sched.h
1930
struct rq *rq; struct rq_flags rf)
kernel/sched/sched.h
1932
static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1933
__acquires(__rq_lockp(rq))
kernel/sched/sched.h
1935
raw_spin_rq_lock_irqsave(rq, rf->flags);
kernel/sched/sched.h
1936
rq_pin_lock(rq, rf);
kernel/sched/sched.h
1939
static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1940
__acquires(__rq_lockp(rq))
kernel/sched/sched.h
1942
raw_spin_rq_lock_irq(rq);
kernel/sched/sched.h
1943
rq_pin_lock(rq, rf);
kernel/sched/sched.h
1946
static inline void rq_lock(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1947
__acquires(__rq_lockp(rq))
kernel/sched/sched.h
1949
raw_spin_rq_lock(rq);
kernel/sched/sched.h
1950
rq_pin_lock(rq, rf);
kernel/sched/sched.h
1953
static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1954
__releases(__rq_lockp(rq))
kernel/sched/sched.h
1956
rq_unpin_lock(rq, rf);
kernel/sched/sched.h
1957
raw_spin_rq_unlock_irqrestore(rq, rf->flags);
kernel/sched/sched.h
1960
static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1961
__releases(__rq_lockp(rq))
kernel/sched/sched.h
1963
rq_unpin_lock(rq, rf);
kernel/sched/sched.h
1964
raw_spin_rq_unlock_irq(rq);
kernel/sched/sched.h
1967
static inline void rq_unlock(struct rq *rq, struct rq_flags *rf)
kernel/sched/sched.h
1968
__releases(__rq_lockp(rq))
kernel/sched/sched.h
1970
rq_unpin_lock(rq, rf);
kernel/sched/sched.h
1971
raw_spin_rq_unlock(rq);
kernel/sched/sched.h
1974
DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
kernel/sched/sched.h
1979
DECLARE_LOCK_GUARD_1_ATTRS(rq_lock, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T)));
kernel/sched/sched.h
1982
DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
kernel/sched/sched.h
1987
DECLARE_LOCK_GUARD_1_ATTRS(rq_lock_irq, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T)));
kernel/sched/sched.h
1990
DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
kernel/sched/sched.h
1995
DECLARE_LOCK_GUARD_1_ATTRS(rq_lock_irqsave, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T)));
kernel/sched/sched.h
1999
static inline struct rq *_this_rq_lock_irq(struct rq_flags *rf) __acquires_ret
kernel/sched/sched.h
2001
struct rq *rq;
kernel/sched/sched.h
2004
rq = this_rq();
kernel/sched/sched.h
2005
rq_lock(rq, rf);
kernel/sched/sched.h
2007
return rq;
kernel/sched/sched.h
2067
queue_balance_callback(struct rq *rq,
kernel/sched/sched.h
2069
void (*func)(struct rq *rq))
kernel/sched/sched.h
2071
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
2078
if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
kernel/sched/sched.h
2082
head->next = rq->balance_callback;
kernel/sched/sched.h
2083
rq->balance_callback = head;
kernel/sched/sched.h
2364
static inline int task_current(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
2366
return rq->curr == p;
kernel/sched/sched.h
2375
static inline int task_current_donor(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
2377
return rq->donor == p;
kernel/sched/sched.h
2388
static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
2498
extern s64 update_curr_common(struct rq *rq);
kernel/sched/sched.h
2514
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
2522
bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
2527
void (*yield_task) (struct rq *rq);
kernel/sched/sched.h
2531
bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
kernel/sched/sched.h
2540
void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
2545
int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
kernel/sched/sched.h
2550
struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf);
kernel/sched/sched.h
2560
struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev,
kernel/sched/sched.h
2567
void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
kernel/sched/sched.h
2568
void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
kernel/sched/sched.h
2585
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2595
void (*rq_online)(struct rq *rq);
kernel/sched/sched.h
2596
void (*rq_offline)(struct rq *rq);
kernel/sched/sched.h
2601
struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
kernel/sched/sched.h
2608
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
kernel/sched/sched.h
2621
void (*switching_from)(struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2622
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2623
void (*switching_to) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2624
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2625
u64 (*get_prio) (struct rq *this_rq, struct task_struct *task);
kernel/sched/sched.h
2626
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
kernel/sched/sched.h
2633
void (*reweight_task)(struct rq *this_rq, struct task_struct *task,
kernel/sched/sched.h
2639
unsigned int (*get_rr_interval)(struct rq *rq,
kernel/sched/sched.h
2645
void (*update_curr)(struct rq *rq);
kernel/sched/sched.h
2663
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
kernel/sched/sched.h
2665
WARN_ON_ONCE(rq->donor != prev);
kernel/sched/sched.h
2666
prev->sched_class->put_prev_task(rq, prev, NULL);
kernel/sched/sched.h
2669
static inline void set_next_task(struct rq *rq, struct task_struct *next)
kernel/sched/sched.h
2671
next->sched_class->set_next_task(rq, next, false);
kernel/sched/sched.h
2675
__put_prev_set_next_dl_server(struct rq *rq,
kernel/sched/sched.h
2680
next->dl_server = rq->dl_server;
kernel/sched/sched.h
2681
rq->dl_server = NULL;
kernel/sched/sched.h
2684
static inline void put_prev_set_next_task(struct rq *rq,
kernel/sched/sched.h
2688
WARN_ON_ONCE(rq->donor != prev);
kernel/sched/sched.h
2690
__put_prev_set_next_dl_server(rq, prev, next);
kernel/sched/sched.h
2695
prev->sched_class->put_prev_task(rq, prev, next);
kernel/sched/sched.h
2696
next->sched_class->set_next_task(rq, next, true);
kernel/sched/sched.h
2754
static inline void rq_modified_begin(struct rq *rq, const struct sched_class *class)
kernel/sched/sched.h
2756
if (sched_class_above(rq->next_class, class))
kernel/sched/sched.h
2757
rq->next_class = class;
kernel/sched/sched.h
2760
static inline bool rq_modified_above(struct rq *rq, const struct sched_class *class)
kernel/sched/sched.h
2762
return sched_class_above(rq->next_class, class);
kernel/sched/sched.h
2765
static inline bool sched_stop_runnable(struct rq *rq)
kernel/sched/sched.h
2767
return rq->stop && task_on_rq_queued(rq->stop);
kernel/sched/sched.h
2770
static inline bool sched_dl_runnable(struct rq *rq)
kernel/sched/sched.h
2772
return rq->dl.dl_nr_running > 0;
kernel/sched/sched.h
2775
static inline bool sched_rt_runnable(struct rq *rq)
kernel/sched/sched.h
2777
return rq->rt.rt_queued > 0;
kernel/sched/sched.h
2780
static inline bool sched_fair_runnable(struct rq *rq)
kernel/sched/sched.h
2782
return rq->cfs.nr_queued > 0;
kernel/sched/sched.h
2785
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
kernel/sched/sched.h
2787
extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf);
kernel/sched/sched.h
2796
extern void sched_balance_trigger(struct rq *rq);
kernel/sched/sched.h
2824
static inline struct task_struct *get_push_task(struct rq *rq)
kernel/sched/sched.h
2826
struct task_struct *p = rq->donor;
kernel/sched/sched.h
2828
lockdep_assert_rq_held(rq);
kernel/sched/sched.h
2830
if (rq->push_busy)
kernel/sched/sched.h
2839
rq->push_busy = true;
kernel/sched/sched.h
2847
static inline void idle_set_state(struct rq *rq,
kernel/sched/sched.h
2850
rq->idle_state = idle_state;
kernel/sched/sched.h
2853
static inline struct cpuidle_state *idle_get_state(struct rq *rq)
kernel/sched/sched.h
2857
return rq->idle_state;
kernel/sched/sched.h
2862
static inline void idle_set_state(struct rq *rq,
kernel/sched/sched.h
2867
static inline struct cpuidle_state *idle_get_state(struct rq *rq)
kernel/sched/sched.h
2885
extern void resched_curr(struct rq *rq);
kernel/sched/sched.h
2886
extern void resched_curr_lazy(struct rq *rq);
kernel/sched/sched.h
2908
extern bool sched_can_stop_tick(struct rq *rq);
kernel/sched/sched.h
2916
static inline void sched_update_tick_dependency(struct rq *rq)
kernel/sched/sched.h
2918
int cpu = cpu_of(rq);
kernel/sched/sched.h
2923
if (sched_can_stop_tick(rq))
kernel/sched/sched.h
2930
static inline void sched_update_tick_dependency(struct rq *rq) { }
kernel/sched/sched.h
2933
static inline void add_nr_running(struct rq *rq, unsigned count)
kernel/sched/sched.h
2935
unsigned prev_nr = rq->nr_running;
kernel/sched/sched.h
2937
rq->nr_running = prev_nr + count;
kernel/sched/sched.h
2939
call_trace_sched_update_nr_running(rq, count);
kernel/sched/sched.h
2942
if (prev_nr < 2 && rq->nr_running >= 2)
kernel/sched/sched.h
2943
set_rd_overloaded(rq->rd, 1);
kernel/sched/sched.h
2945
sched_update_tick_dependency(rq);
kernel/sched/sched.h
2948
static inline void sub_nr_running(struct rq *rq, unsigned count)
kernel/sched/sched.h
2950
rq->nr_running -= count;
kernel/sched/sched.h
2952
call_trace_sched_update_nr_running(rq, -count);
kernel/sched/sched.h
2956
sched_update_tick_dependency(rq);
kernel/sched/sched.h
2959
static inline void __block_task(struct rq *rq, struct task_struct *p)
kernel/sched/sched.h
2962
rq->nr_uninterruptible++;
kernel/sched/sched.h
2965
atomic_inc(&rq->nr_iowait);
kernel/sched/sched.h
3002
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
3003
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
3005
extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
3036
static inline int hrtick_enabled(struct rq *rq)
kernel/sched/sched.h
3038
if (!cpu_active(cpu_of(rq)))
kernel/sched/sched.h
3040
return hrtimer_is_hres_active(&rq->hrtick_timer);
kernel/sched/sched.h
3043
static inline int hrtick_enabled_fair(struct rq *rq)
kernel/sched/sched.h
3047
return hrtick_enabled(rq);
kernel/sched/sched.h
3050
static inline int hrtick_enabled_dl(struct rq *rq)
kernel/sched/sched.h
3054
return hrtick_enabled(rq);
kernel/sched/sched.h
3057
extern void hrtick_start(struct rq *rq, u64 delay);
kernel/sched/sched.h
3061
static inline int hrtick_enabled_fair(struct rq *rq)
kernel/sched/sched.h
3066
static inline int hrtick_enabled_dl(struct rq *rq)
kernel/sched/sched.h
3071
static inline int hrtick_enabled(struct rq *rq)
kernel/sched/sched.h
3106
static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
kernel/sched/sched.h
3130
static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
kernel/sched/sched.h
3156
extern void double_rq_lock(struct rq *rq1, struct rq *rq2)
kernel/sched/sched.h
3169
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3187
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3219
static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3228
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
kernel/sched/sched.h
3292
static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
kernel/sched/sched.h
3302
extern void set_rq_online (struct rq *rq);
kernel/sched/sched.h
3303
extern void set_rq_offline(struct rq *rq);
kernel/sched/sched.h
3307
DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
kernel/sched/sched.h
3360
extern void nohz_balance_exit_idle(struct rq *rq);
kernel/sched/sched.h
3362
static inline void nohz_balance_exit_idle(struct rq *rq) { }
kernel/sched/sched.h
3375
extern void __sched_core_account_forceidle(struct rq *rq);
kernel/sched/sched.h
3377
static inline void sched_core_account_forceidle(struct rq *rq)
kernel/sched/sched.h
3380
__sched_core_account_forceidle(rq);
kernel/sched/sched.h
3383
extern void __sched_core_tick(struct rq *rq);
kernel/sched/sched.h
3385
static inline void sched_core_tick(struct rq *rq)
kernel/sched/sched.h
3387
if (sched_core_enabled(rq) && schedstat_enabled())
kernel/sched/sched.h
3388
__sched_core_tick(rq);
kernel/sched/sched.h
3393
static inline void sched_core_account_forceidle(struct rq *rq) { }
kernel/sched/sched.h
3395
static inline void sched_core_tick(struct rq *rq) { }
kernel/sched/sched.h
3470
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
kernel/sched/sched.h
3475
cpu_of(rq)));
kernel/sched/sched.h
3477
data->func(data, rq_clock(rq), flags);
kernel/sched/sched.h
3480
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { }
kernel/sched/sched.h
3515
static inline unsigned long cpu_bw_dl(struct rq *rq)
kernel/sched/sched.h
3517
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
kernel/sched/sched.h
3520
static inline unsigned long cpu_util_dl(struct rq *rq)
kernel/sched/sched.h
3522
return READ_ONCE(rq->avg_dl.util_avg);
kernel/sched/sched.h
3529
static inline unsigned long cpu_util_rt(struct rq *rq)
kernel/sched/sched.h
3531
return READ_ONCE(rq->avg_rt.util_avg);
kernel/sched/sched.h
3563
static inline unsigned long uclamp_rq_get(struct rq *rq,
kernel/sched/sched.h
3566
return READ_ONCE(rq->uclamp[clamp_id].value);
kernel/sched/sched.h
3569
static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id,
kernel/sched/sched.h
3572
WRITE_ONCE(rq->uclamp[clamp_id].value, value);
kernel/sched/sched.h
3575
static inline bool uclamp_rq_is_idle(struct rq *rq)
kernel/sched/sched.h
3577
return rq->uclamp_flags & UCLAMP_FLAG_IDLE;
kernel/sched/sched.h
3581
static inline bool uclamp_rq_is_capped(struct rq *rq)
kernel/sched/sched.h
3589
rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq);
kernel/sched/sched.h
3590
max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
kernel/sched/sched.h
3635
static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
kernel/sched/sched.h
364
extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec);
kernel/sched/sched.h
3645
uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
kernel/sched/sched.h
3654
uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value)
kernel/sched/sched.h
3658
static inline bool uclamp_rq_is_idle(struct rq *rq)
kernel/sched/sched.h
3667
static inline unsigned long cpu_util_irq(struct rq *rq)
kernel/sched/sched.h
3669
return READ_ONCE(rq->avg_irq.util_avg);
kernel/sched/sched.h
3684
static inline unsigned long cpu_util_irq(struct rq *rq)
kernel/sched/sched.h
3726
static inline void membarrier_switch_mm(struct rq *rq,
kernel/sched/sched.h
3736
if (READ_ONCE(rq->membarrier_state) == membarrier_state)
kernel/sched/sched.h
3739
WRITE_ONCE(rq->membarrier_state, membarrier_state);
kernel/sched/sched.h
3744
static inline void membarrier_switch_mm(struct rq *rq,
kernel/sched/sched.h
4016
void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task)
kernel/sched/sched.h
4028
bool task_is_pushable(struct rq *rq, struct task_struct *p, int cpu)
kernel/sched/sched.h
4030
if (!task_on_cpu(rq, p) &&
kernel/sched/sched.h
4067
extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
4068
extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags);
kernel/sched/sched.h
4070
extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
kernel/sched/sched.h
4072
extern void __balance_callbacks(struct rq *rq, struct rq_flags *rf);
kernel/sched/sched.h
4073
extern void balance_callbacks(struct rq *rq, struct balance_callback *head);
kernel/sched/sched.h
415
extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
kernel/sched/sched.h
419
extern void fair_server_init(struct rq *rq);
kernel/sched/sched.h
420
extern void ext_server_init(struct rq *rq);
kernel/sched/sched.h
421
extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq);
kernel/sched/sched.h
674
void (*func)(struct rq *rq);
kernel/sched/sched.h
734
struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */
kernel/sched/sched.h
83
struct rq;
kernel/sched/sched.h
853
struct rq *rq; /* this is always top-level rq, cache? */
kernel/sched/stats.c
117
struct rq *rq;
kernel/sched/stats.c
12
wait_start = rq_clock(rq);
kernel/sched/stats.c
121
rq = cpu_rq(cpu);
kernel/sched/stats.c
126
cpu, rq->yld_count,
kernel/sched/stats.c
127
rq->sched_count, rq->sched_goidle,
kernel/sched/stats.c
128
rq->ttwu_count, rq->ttwu_local,
kernel/sched/stats.c
129
rq->rq_cpu_time,
kernel/sched/stats.c
130
rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
kernel/sched/stats.c
21
void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
kernel/sched/stats.c
24
u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
kernel/sched/stats.c
48
void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
kernel/sched/stats.c
57
u64 delta = rq_clock(rq) - sleep_start;
kernel/sched/stats.c
7
void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
kernel/sched/stats.c
75
u64 delta = rq_clock(rq) - block_start;
kernel/sched/stats.h
114
void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
kernel/sched/stats.h
116
static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
kernel/sched/stats.h
13
rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
kernel/sched/stats.h
15
if (rq) {
kernel/sched/stats.h
16
rq->rq_sched_info.run_delay += delta;
kernel/sched/stats.h
17
rq->rq_sched_info.pcount++;
kernel/sched/stats.h
210
struct rq *rq;
kernel/sched/stats.h
212
rq = __task_rq_lock(p, &rf);
kernel/sched/stats.h
214
__task_rq_unlock(rq, p, &rf);
kernel/sched/stats.h
235
static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
kernel/sched/stats.h
246
static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
kernel/sched/stats.h
25
rq_sched_info_depart(struct rq *rq, unsigned long long delta)
kernel/sched/stats.h
253
delta = rq_clock(rq) - t->sched_info.last_queued;
kernel/sched/stats.h
262
rq_sched_info_dequeue(rq, delta);
kernel/sched/stats.h
27
if (rq)
kernel/sched/stats.h
270
static void sched_info_arrive(struct rq *rq, struct task_struct *t)
kernel/sched/stats.h
277
now = rq_clock(rq);
kernel/sched/stats.h
28
rq->rq_cpu_time += delta;
kernel/sched/stats.h
290
rq_sched_info_arrive(rq, delta);
kernel/sched/stats.h
298
static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
kernel/sched/stats.h
301
t->sched_info.last_queued = rq_clock(rq);
kernel/sched/stats.h
312
static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
kernel/sched/stats.h
314
unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
kernel/sched/stats.h
316
rq_sched_info_depart(rq, delta);
kernel/sched/stats.h
319
sched_info_enqueue(rq, t);
kernel/sched/stats.h
32
rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
kernel/sched/stats.h
328
sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
kernel/sched/stats.h
335
if (prev != rq->idle)
kernel/sched/stats.h
336
sched_info_depart(rq, prev);
kernel/sched/stats.h
338
if (next != rq->idle)
kernel/sched/stats.h
339
sched_info_arrive(rq, next);
kernel/sched/stats.h
34
if (rq)
kernel/sched/stats.h
343
# define sched_info_enqueue(rq, t) do { } while (0)
kernel/sched/stats.h
344
# define sched_info_dequeue(rq, t) do { } while (0)
kernel/sched/stats.h
345
# define sched_info_switch(rq, t, next) do { } while (0)
kernel/sched/stats.h
35
rq->rq_sched_info.run_delay += delta;
kernel/sched/stats.h
47
void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
kernel/sched/stats.h
50
void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
kernel/sched/stats.h
52
void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
kernel/sched/stats.h
72
static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
kernel/sched/stats.h
73
static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
kernel/sched/stats.h
74
static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
kernel/sched/stats.h
85
# define __update_stats_wait_start(rq, p, stats) do { } while (0)
kernel/sched/stats.h
86
# define __update_stats_wait_end(rq, p, stats) do { } while (0)
kernel/sched/stats.h
87
# define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
kernel/sched/stop_task.c
19
balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
kernel/sched/stop_task.c
21
return sched_stop_runnable(rq);
kernel/sched/stop_task.c
25
wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/stop_task.c
30
static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
kernel/sched/stop_task.c
32
stop->se.exec_start = rq_clock_task(rq);
kernel/sched/stop_task.c
35
static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf)
kernel/sched/stop_task.c
37
if (!sched_stop_runnable(rq))
kernel/sched/stop_task.c
40
return rq->stop;
kernel/sched/stop_task.c
44
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/stop_task.c
46
add_nr_running(rq, 1);
kernel/sched/stop_task.c
50
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
kernel/sched/stop_task.c
52
sub_nr_running(rq, 1);
kernel/sched/stop_task.c
56
static void yield_task_stop(struct rq *rq)
kernel/sched/stop_task.c
61
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next)
kernel/sched/stop_task.c
63
update_curr_common(rq);
kernel/sched/stop_task.c
74
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
kernel/sched/stop_task.c
78
static void switching_to_stop(struct rq *rq, struct task_struct *p)
kernel/sched/stop_task.c
84
prio_changed_stop(struct rq *rq, struct task_struct *p, u64 oldprio)
kernel/sched/stop_task.c
92
static void update_curr_stop(struct rq *rq)
kernel/sched/syscalls.c
1332
struct rq *rq;
kernel/sched/syscalls.c
1334
rq = this_rq_lock_irq(&rf);
kernel/sched/syscalls.c
1336
schedstat_inc(rq->yld_count);
kernel/sched/syscalls.c
1337
rq->donor->sched_class->yield_task(rq);
kernel/sched/syscalls.c
1340
rq_unlock_irq(rq, &rf);
kernel/sched/syscalls.c
1407
struct rq *rq, *p_rq;
kernel/sched/syscalls.c
1411
rq = this_rq();
kernel/sched/syscalls.c
1412
curr = rq->donor;
kernel/sched/syscalls.c
1420
if (rq->nr_running == 1 && p_rq->nr_running == 1)
kernel/sched/syscalls.c
1423
guard(double_rq_lock)(rq, p_rq);
kernel/sched/syscalls.c
1436
yielded = curr->sched_class->yield_to_task(rq, p);
kernel/sched/syscalls.c
1438
schedstat_inc(rq->yld_count);
kernel/sched/syscalls.c
1443
if (preempt && rq != p_rq)
kernel/sched/syscalls.c
1528
struct rq *rq = scope.rq;
kernel/sched/syscalls.c
1530
time_slice = p->sched_class->get_rr_interval(rq, p);
kernel/sched/syscalls.c
200
struct rq *rq = cpu_rq(cpu);
kernel/sched/syscalls.c
202
if (sched_core_enabled(rq) && rq->curr == rq->idle)
kernel/sched/syscalls.c
504
struct rq *rq;
kernel/sched/syscalls.c
571
rq = task_rq_lock(p, &rf);
kernel/sched/syscalls.c
572
update_rq_clock(rq);
kernel/sched/syscalls.c
577
if (p == rq->stop) {
kernel/sched/syscalls.c
624
cpumask_t *span = rq->rd->span;
kernel/sched/syscalls.c
632
rq->rd->dl_bw.bw == 0) {
kernel/sched/syscalls.c
642
task_rq_unlock(rq, p, &rf);
kernel/sched/syscalls.c
703
head = splice_balance_callbacks(rq);
kernel/sched/syscalls.c
704
task_rq_unlock(rq, p, &rf);
kernel/sched/syscalls.c
713
balance_callbacks(rq, head);
kernel/sched/syscalls.c
719
task_rq_unlock(rq, p, &rf);
kernel/sched/topology.c
2561
struct rq *rq = NULL;
kernel/sched/topology.c
2680
rq = cpu_rq(i);
kernel/sched/topology.c
2696
if (rq && sched_debug_verbose)
kernel/sched/topology.c
472
void rq_attach_root(struct rq *rq, struct root_domain *rd)
kernel/sched/topology.c
477
rq_lock_irqsave(rq, &rf);
kernel/sched/topology.c
479
if (rq->rd) {
kernel/sched/topology.c
480
old_rd = rq->rd;
kernel/sched/topology.c
482
if (cpumask_test_cpu(rq->cpu, old_rd->online))
kernel/sched/topology.c
483
set_rq_offline(rq);
kernel/sched/topology.c
485
cpumask_clear_cpu(rq->cpu, old_rd->span);
kernel/sched/topology.c
497
rq->rd = rd;
kernel/sched/topology.c
499
cpumask_set_cpu(rq->cpu, rd->span);
kernel/sched/topology.c
500
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
kernel/sched/topology.c
501
set_rq_online(rq);
kernel/sched/topology.c
508
if (rq->fair_server.dl_server)
kernel/sched/topology.c
509
__dl_server_attach_root(&rq->fair_server, rq);
kernel/sched/topology.c
512
if (rq->ext_server.dl_server)
kernel/sched/topology.c
513
__dl_server_attach_root(&rq->ext_server, rq);
kernel/sched/topology.c
516
rq_unlock_irqrestore(rq, &rf);
kernel/sched/topology.c
723
struct rq *rq = cpu_rq(cpu);
kernel/sched/topology.c
774
rq_attach_root(rq, rd);
kernel/sched/topology.c
775
tmp = rq->sd;
kernel/sched/topology.c
776
rcu_assign_pointer(rq->sd, sd);
kernel/trace/blktrace.c
1002
return blk_trace_bio_get_cgid(rq->q, rq->bio);
kernel/trace/blktrace.c
1021
static void blk_add_trace_rq(struct request *rq, blk_status_t error,
kernel/trace/blktrace.c
1027
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1033
if (blk_rq_is_passthrough(rq))
kernel/trace/blktrace.c
1038
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags,
kernel/trace/blktrace.c
1043
static void blk_add_trace_rq_insert(void *ignore, struct request *rq)
kernel/trace/blktrace.c
1045
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
kernel/trace/blktrace.c
1046
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1049
static void blk_add_trace_rq_issue(void *ignore, struct request *rq)
kernel/trace/blktrace.c
1051
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
kernel/trace/blktrace.c
1052
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1055
static void blk_add_trace_rq_merge(void *ignore, struct request *rq)
kernel/trace/blktrace.c
1057
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
kernel/trace/blktrace.c
1058
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1061
static void blk_add_trace_rq_requeue(void *ignore, struct request *rq)
kernel/trace/blktrace.c
1063
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
kernel/trace/blktrace.c
1064
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1067
static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
kernel/trace/blktrace.c
1070
blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
kernel/trace/blktrace.c
1071
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1074
static void blk_add_trace_zone_update_request(void *ignore, struct request *rq)
kernel/trace/blktrace.c
1079
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1086
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ZONE_APPEND,
kernel/trace/blktrace.c
1087
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1276
static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
kernel/trace/blktrace.c
1283
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1290
r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
kernel/trace/blktrace.c
1293
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
kernel/trace/blktrace.c
1294
rq->cmd_flags, BLK_TA_REMAP, 0,
kernel/trace/blktrace.c
1295
sizeof(r), &r, blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
1309
void blk_add_driver_data(struct request *rq, void *data, size_t len)
kernel/trace/blktrace.c
1314
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1320
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0,
kernel/trace/blktrace.c
1322
blk_trace_request_get_cgid(rq));
kernel/trace/blktrace.c
997
blk_trace_request_get_cgid(struct request *rq)
kernel/trace/blktrace.c
999
if (!rq->bio)
net/9p/trans_fd.c
250
m = container_of(work, struct p9_conn, rq);
net/9p/trans_fd.c
366
schedule_work(&m->rq);
net/9p/trans_fd.c
563
INIT_WORK(&m->rq, p9_read_work);
net/9p/trans_fd.c
605
schedule_work(&m->rq);
net/9p/trans_fd.c
805
cancel_work_sync(&m->rq);
net/9p/trans_fd.c
94
struct work_struct rq;
net/atm/common.c
227
struct sk_buff_head queue, *rq;
net/atm/common.c
232
rq = &sk_atm(vcc)->sk_receive_queue;
net/atm/common.c
234
spin_lock_irqsave(&rq->lock, flags);
net/atm/common.c
235
skb_queue_splice_init(rq, &queue);
net/atm/common.c
236
spin_unlock_irqrestore(&rq->lock, flags);
net/bridge/br_ioctl.c
144
int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq,
net/bridge/br_private.h
959
int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq,
net/key/af_key.c
1960
struct sadb_x_ipsecrequest *rq)
net/key/af_key.c
1969
if (rq->sadb_x_ipsecrequest_mode == 0)
net/key/af_key.c
1971
if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
net/key/af_key.c
1974
t->id.proto = rq->sadb_x_ipsecrequest_proto;
net/key/af_key.c
1975
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
net/key/af_key.c
1978
if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
net/key/af_key.c
1983
} else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
net/key/af_key.c
1984
t->reqid = rq->sadb_x_ipsecrequest_reqid;
net/key/af_key.c
1996
(struct sockaddr *)(rq + 1),
net/key/af_key.c
1997
rq->sadb_x_ipsecrequest_len - sizeof(*rq),
net/key/af_key.c
2015
struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
net/key/af_key.c
2020
while (len >= sizeof(*rq)) {
net/key/af_key.c
2021
if (len < rq->sadb_x_ipsecrequest_len ||
net/key/af_key.c
2022
rq->sadb_x_ipsecrequest_len < sizeof(*rq))
net/key/af_key.c
2025
if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
net/key/af_key.c
2027
len -= rq->sadb_x_ipsecrequest_len;
net/key/af_key.c
2028
rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
net/key/af_key.c
2174
struct sadb_x_ipsecrequest *rq;
net/key/af_key.c
2185
rq = skb_put(skb, req_size);
net/key/af_key.c
2187
memset(rq, 0, sizeof(*rq));
net/key/af_key.c
2188
rq->sadb_x_ipsecrequest_len = req_size;
net/key/af_key.c
2189
rq->sadb_x_ipsecrequest_proto = t->id.proto;
net/key/af_key.c
2192
rq->sadb_x_ipsecrequest_mode = mode;
net/key/af_key.c
2193
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE;
net/key/af_key.c
2195
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE;
net/key/af_key.c
2197
rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
net/key/af_key.c
2198
rq->sadb_x_ipsecrequest_reqid = t->reqid;
net/key/af_key.c
2201
u8 *sa = (void *)(rq + 1);
net/key/af_key.c
2571
struct sadb_x_ipsecrequest *rq;
net/key/af_key.c
2625
rq = (struct sadb_x_ipsecrequest *)(pol + 1);
net/key/af_key.c
2632
ret = ipsecrequests_to_migrate(rq, len, &m[i]);
net/key/af_key.c
2637
rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
net/key/af_key.c
3540
struct sadb_x_ipsecrequest *rq;
net/key/af_key.c
3548
rq = skb_put_zero(skb, size_req);
net/key/af_key.c
3549
rq->sadb_x_ipsecrequest_len = size_req;
net/key/af_key.c
3550
rq->sadb_x_ipsecrequest_proto = proto;
net/key/af_key.c
3551
rq->sadb_x_ipsecrequest_mode = mode;
net/key/af_key.c
3552
rq->sadb_x_ipsecrequest_level = level;
net/key/af_key.c
3553
rq->sadb_x_ipsecrequest_reqid = reqid;
net/key/af_key.c
3555
sa = (u8 *) (rq + 1);
net/sunrpc/cache.c
1065
struct cache_request *rq = NULL;
net/sunrpc/cache.c
1082
rq = cr;
net/sunrpc/cache.c
1091
if (rq) {
net/sunrpc/cache.c
1092
cache_put(rq->item, cd);
net/sunrpc/cache.c
1093
kfree(rq->buf);
net/sunrpc/cache.c
1094
kfree(rq);
net/sunrpc/cache.c
840
struct cache_request *rq;
net/sunrpc/cache.c
864
rq = container_of(rp->q.list.next, struct cache_request, q.list);
net/sunrpc/cache.c
865
WARN_ON_ONCE(rq->q.reader);
net/sunrpc/cache.c
867
rq->readers++;
net/sunrpc/cache.c
870
if (rq->len == 0) {
net/sunrpc/cache.c
871
err = cache_request(cd, rq);
net/sunrpc/cache.c
874
rq->len = err;
net/sunrpc/cache.c
877
if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
net/sunrpc/cache.c
880
list_move(&rp->q.list, &rq->q.list);
net/sunrpc/cache.c
883
if (rp->offset + count > rq->len)
net/sunrpc/cache.c
884
count = rq->len - rp->offset;
net/sunrpc/cache.c
886
if (copy_to_user(buf, rq->buf + rp->offset, count))
net/sunrpc/cache.c
889
if (rp->offset >= rq->len) {
net/sunrpc/cache.c
892
list_move(&rp->q.list, &rq->q.list);
net/sunrpc/cache.c
901
rq->readers--;
net/sunrpc/cache.c
902
if (rq->readers == 0 &&
net/sunrpc/cache.c
903
!test_bit(CACHE_PENDING, &rq->item->flags)) {
net/sunrpc/cache.c
904
list_del(&rq->q.list);
net/sunrpc/cache.c
906
cache_put(rq->item, cd);
net/sunrpc/cache.c
907
kfree(rq->buf);
net/sunrpc/cache.c
908
kfree(rq);
rust/helpers/blk.c
6
__rust_helper void *rust_helper_blk_mq_rq_to_pdu(struct request *rq)
rust/helpers/blk.c
8
return blk_mq_rq_to_pdu(rq);
tools/perf/util/bpf_skel/lock_contention.bpf.c
848
extern struct rq runqueues __ksym;
tools/perf/util/bpf_skel/lock_contention.bpf.c
928
struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
tools/perf/util/bpf_skel/lock_contention.bpf.c
930
if (rq == NULL)
tools/perf/util/bpf_skel/lock_contention.bpf.c
933
lock_addr = (__u64)(void *)rq + lock_off;
tools/sched_ext/include/scx/common.bpf.h
100
struct rq *scx_bpf_locked_rq(void) __ksym;
tools/sched_ext/include/scx/common.bpf.h
99
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
tools/sched_ext/include/scx/compat.bpf.h
213
struct rq *rq;
tools/sched_ext/include/scx/compat.bpf.h
218
rq = scx_bpf_cpu_rq(cpu);
tools/sched_ext/include/scx/compat.bpf.h
220
return rq ? rq->curr : NULL;
tools/testing/selftests/bpf/progs/test_access_variable_array.c
11
int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq,
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
19
extern const struct rq runqueues __ksym; /* struct type global var. */
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
25
struct rq *rq;
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
35
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
36
if (rq)
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
37
out__rq_cpu = rq->cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
42
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
43
if (rq) /* should always be valid, but we can't spare the check. */
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
44
out__cpu_0_rq_cpu = rq->cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
47
rq = (struct rq *)bpf_this_cpu_ptr(&runqueues);
tools/testing/selftests/bpf/progs/test_ksyms_btf.c
48
out__this_rq_cpu = rq->cpu;
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
14
struct rq *rq;
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
19
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
25
*(volatile int *)(&rq->cpu);
tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c
8
extern const struct rq runqueues __ksym; /* struct type global var. */
tools/testing/selftests/bpf/progs/test_ksyms_weak.c
21
extern const struct rq runqueues __ksym __weak; /* typed */
tools/testing/selftests/bpf/progs/test_ksyms_weak.c
39
struct rq *rq;
tools/testing/selftests/bpf/progs/test_ksyms_weak.c
42
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
tools/testing/selftests/bpf/progs/test_ksyms_weak.c
43
if (rq && bpf_ksym_exists(&runqueues))
tools/testing/selftests/bpf/progs/test_ksyms_weak.c
44
out__existing_typed = rq->cpu;