Symbol: q
arch/alpha/include/asm/atomic.h
153
ATOMIC64_OP(op, op##q) \
arch/alpha/include/asm/atomic.h
154
ATOMIC64_OP_RETURN(op, op##q) \
arch/alpha/include/asm/atomic.h
155
ATOMIC64_FETCH_OP(op, op##q)
arch/alpha/include/asm/core_t2.h
597
IOPORT(q, 64)
arch/alpha/include/asm/core_wildfire.h
227
#define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36)
arch/alpha/include/asm/core_wildfire.h
230
#define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q))
arch/alpha/include/asm/core_wildfire.h
231
#define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h))
arch/alpha/include/asm/core_wildfire.h
233
#define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL)
arch/alpha/include/asm/core_wildfire.h
234
#define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL)
arch/alpha/include/asm/core_wildfire.h
235
#define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL)
arch/alpha/include/asm/core_wildfire.h
237
#define WILDFIRE_qsd(q) \
arch/alpha/include/asm/core_wildfire.h
238
((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23)))
arch/alpha/include/asm/core_wildfire.h
243
#define WILDFIRE_qsa(q) \
arch/alpha/include/asm/core_wildfire.h
244
((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23)))
arch/alpha/include/asm/core_wildfire.h
246
#define WILDFIRE_iop(q) \
arch/alpha/include/asm/core_wildfire.h
247
((wildfire_iop *)(WILDFIRE_QBB_IO(q)|WILDFIRE_IOP_ENTITY|(((1UL<<13)-1)<<23)))
arch/alpha/include/asm/core_wildfire.h
249
#define WILDFIRE_gp(q) \
arch/alpha/include/asm/core_wildfire.h
250
((wildfire_gp *)(WILDFIRE_QBB_IO(q)|WILDFIRE_GP_ENTITY|(((1UL<<13)-1)<<23)))
arch/alpha/include/asm/core_wildfire.h
252
#define WILDFIRE_pca(q,pca) \
arch/alpha/include/asm/core_wildfire.h
253
((wildfire_pca *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)))
arch/alpha/include/asm/core_wildfire.h
255
#define WILDFIRE_ne(q,pca) \
arch/alpha/include/asm/core_wildfire.h
256
((wildfire_ne *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(1UL<<16)))
arch/alpha/include/asm/core_wildfire.h
258
#define WILDFIRE_fe(q,pca) \
arch/alpha/include/asm/core_wildfire.h
259
((wildfire_fe *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(3UL<<15)))
arch/alpha/include/asm/core_wildfire.h
261
#define WILDFIRE_pci(q,h) \
arch/alpha/include/asm/core_wildfire.h
262
((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23)))
arch/alpha/kernel/core_marvel.c
59
unsigned long q;
arch/alpha/kernel/core_marvel.c
62
q = ev7csr->csr;
arch/alpha/kernel/core_marvel.c
65
return q;
arch/alpha/kernel/core_marvel.c
69
write_ev7_csr(int pe, unsigned long offset, unsigned long q)
arch/alpha/kernel/core_marvel.c
74
ev7csr->csr = q;
arch/alpha/math-emu/sfp-util.h
21
#define udiv_qrnnd(q, r, n1, n0, d) \
arch/alpha/math-emu/sfp-util.h
23
(q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
arch/arm/net/bpf_jit_32.c
264
u64 q;
arch/arm/net/bpf_jit_32.c
266
q = div64_s64(dividend, divisor);
arch/arm/net/bpf_jit_32.c
268
return dividend - q * divisor;
arch/arm/nwfpe/softfloat.c
1352
bits32 q;
arch/arm/nwfpe/softfloat.c
1395
q = ( bSig <= aSig );
arch/arm/nwfpe/softfloat.c
1396
if ( q ) aSig -= bSig;
arch/arm/nwfpe/softfloat.c
1400
q = tmp;
arch/arm/nwfpe/softfloat.c
1401
q >>= 32 - expDiff;
arch/arm/nwfpe/softfloat.c
1403
aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
arch/arm/nwfpe/softfloat.c
1424
q = q64>>( 64 - expDiff );
arch/arm/nwfpe/softfloat.c
1426
aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q;
arch/arm/nwfpe/softfloat.c
1430
++q;
arch/arm/nwfpe/softfloat.c
1434
if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) {
arch/arm/nwfpe/softfloat.c
2250
bits64 q, alternateASig;
arch/arm/nwfpe/softfloat.c
2288
q = ( bSig <= aSig );
arch/arm/nwfpe/softfloat.c
2289
if ( q ) aSig -= bSig;
arch/arm/nwfpe/softfloat.c
2292
q = estimateDiv128To64( aSig, 0, bSig );
arch/arm/nwfpe/softfloat.c
2293
q = ( 2 < q ) ? q - 2 : 0;
arch/arm/nwfpe/softfloat.c
2294
aSig = - ( ( bSig>>2 ) * q );
arch/arm/nwfpe/softfloat.c
2299
q = estimateDiv128To64( aSig, 0, bSig );
arch/arm/nwfpe/softfloat.c
2300
q = ( 2 < q ) ? q - 2 : 0;
arch/arm/nwfpe/softfloat.c
2301
q >>= 64 - expDiff;
arch/arm/nwfpe/softfloat.c
2303
aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
arch/arm/nwfpe/softfloat.c
2311
++q;
arch/arm/nwfpe/softfloat.c
2315
if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) {
arch/arm/nwfpe/softfloat.c
3074
bits64 q, term0, term1, alternateASig0, alternateASig1;
arch/arm/nwfpe/softfloat.c
3118
q = ( bSig <= aSig0 );
arch/arm/nwfpe/softfloat.c
3119
if ( q ) aSig0 -= bSig;
arch/arm/nwfpe/softfloat.c
3122
q = estimateDiv128To64( aSig0, aSig1, bSig );
arch/arm/nwfpe/softfloat.c
3123
q = ( 2 < q ) ? q - 2 : 0;
arch/arm/nwfpe/softfloat.c
3124
mul64To128( bSig, q, &term0, &term1 );
arch/arm/nwfpe/softfloat.c
3131
q = estimateDiv128To64( aSig0, aSig1, bSig );
arch/arm/nwfpe/softfloat.c
3132
q = ( 2 < q ) ? q - 2 : 0;
arch/arm/nwfpe/softfloat.c
3133
q >>= 64 - expDiff;
arch/arm/nwfpe/softfloat.c
3134
mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 );
arch/arm/nwfpe/softfloat.c
3138
++q;
arch/arm/nwfpe/softfloat.c
3149
&& ( q & 1 ) )
arch/arm64/kernel/module-plts.c
44
u64 p, q;
arch/arm64/kernel/module-plts.c
56
q = ALIGN_DOWN((u64)b, SZ_4K);
arch/arm64/kernel/module-plts.c
62
if (a->adrp == b->adrp && p == q)
arch/arm64/kernel/module-plts.c
66
(q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
arch/arm64/lib/xor-neon.c
184
static inline uint64x2_t eor3(uint64x2_t p, uint64x2_t q, uint64x2_t r)
arch/arm64/lib/xor-neon.c
190
: "=w"(res) : "w"(p), "w"(q), "w"(r));
arch/loongarch/include/asm/asmmacro.h
571
xvpermi.q \xd, \tmp1, 0x2
arch/m68k/mac/iop.c
454
struct iop_msg *msg, *q;
arch/m68k/mac/iop.c
471
if (!(q = iop_send_queue[iop_num][chan])) {
arch/m68k/mac/iop.c
475
while (q->next) q = q->next;
arch/m68k/mac/iop.c
476
q->next = msg;
arch/mips/bmips/setup.c
184
const struct bmips_quirk *q;
arch/mips/bmips/setup.c
204
for (q = bmips_quirk_list; q->quirk_fn; q++) {
arch/mips/bmips/setup.c
206
q->compatible)) {
arch/mips/bmips/setup.c
207
q->quirk_fn();
arch/mips/include/asm/io.h
318
BUILDIO_MEM(q, u64)
arch/mips/include/asm/io.h
320
__BUILD_MEMORY_PFX(__raw_, q, u64, 0)
arch/mips/include/asm/io.h
321
__BUILD_MEMORY_PFX(__mem_, q, u64, 0)
arch/mips/include/asm/io.h
335
BUILDIO_IOPORT(q, u64)
arch/mips/include/asm/io.h
342
__BUILDIO(q, u64)
arch/mips/include/asm/io.h
433
BUILDSTRING(q, u64)
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
224
int q = (queue_id >> 4) & 0xf;
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
226
return unit * 256 + core * 16 + q;
arch/mips/include/asm/sgi/heart.h
86
u64 q[HEART_MEMORY_BANKS]; /* readq() */
arch/mips/include/asm/sibyte/bcm1480_regs.h
636
#define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
arch/mips/include/asm/sibyte/bcm1480_regs.h
639
#define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
arch/mips/math-emu/sp_sqrt.c
14
int ix, s, q, m, t, i;
arch/mips/math-emu/sp_sqrt.c
74
q = 0; /* q = sqrt(x) */
arch/mips/math-emu/sp_sqrt.c
82
q += r;
arch/mips/math-emu/sp_sqrt.c
92
q += 2;
arch/mips/math-emu/sp_sqrt.c
95
q += (q & 1);
arch/mips/math-emu/sp_sqrt.c
99
ix = (q >> 1) + 0x3f000000;
arch/parisc/include/asm/psw.h
90
unsigned int q:1;
arch/powerpc/boot/xz_config.h
16
uint32_t *q = p;
arch/powerpc/boot/xz_config.h
18
return swab32(*q);
arch/powerpc/include/asm/sfp-machine.h
280
#define udiv_qrnnd(q, r, n1, n0, d) \
arch/powerpc/include/asm/sfp-machine.h
313
(q) = (UWtype) __q1 * __ll_B | __q0; \
arch/powerpc/include/asm/xive.h
117
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
arch/powerpc/include/asm/xive.h
119
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
arch/powerpc/kernel/prom_init.c
443
const char *p, *q;
arch/powerpc/kernel/prom_init.c
448
for (p = msg; *p != 0; p = q) {
arch/powerpc/kernel/prom_init.c
449
for (q = p; *q != 0 && *q != '\n'; ++q)
arch/powerpc/kernel/prom_init.c
451
if (q > p)
arch/powerpc/kernel/prom_init.c
452
call_prom("write", 3, 1, prom.stdout, p, q - p);
arch/powerpc/kernel/prom_init.c
453
if (*q == 0)
arch/powerpc/kernel/prom_init.c
455
++q;
arch/powerpc/kernel/prom_init.c
501
const char *p, *q, *s;
arch/powerpc/kernel/prom_init.c
508
for (p = format; *p != 0; p = q) {
arch/powerpc/kernel/prom_init.c
509
for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
arch/powerpc/kernel/prom_init.c
511
if (q > p)
arch/powerpc/kernel/prom_init.c
512
call_prom("write", 3, 1, prom.stdout, p, q - p);
arch/powerpc/kernel/prom_init.c
513
if (*q == 0)
arch/powerpc/kernel/prom_init.c
515
if (*q == '\n') {
arch/powerpc/kernel/prom_init.c
516
++q;
arch/powerpc/kernel/prom_init.c
521
++q;
arch/powerpc/kernel/prom_init.c
522
if (*q == 0)
arch/powerpc/kernel/prom_init.c
524
while (*q == 'l') {
arch/powerpc/kernel/prom_init.c
525
++q;
arch/powerpc/kernel/prom_init.c
528
switch (*q) {
arch/powerpc/kernel/prom_init.c
530
++q;
arch/powerpc/kernel/prom_init.c
535
++q;
arch/powerpc/kernel/prom_init.c
551
++q;
arch/powerpc/kernel/prom_init.c
567
++q;
arch/powerpc/kernel/prom_init.c
671
static void __init add_string(char **str, const char *q)
arch/powerpc/kernel/prom_init.c
675
while (*q)
arch/powerpc/kernel/prom_init.c
676
*p++ = *q++;
arch/powerpc/kvm/book3s_xive.c
1008
struct xive_q *q;
arch/powerpc/kvm/book3s_xive.c
1020
q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive.c
1021
atomic_inc(&q->pending_count);
arch/powerpc/kvm/book3s_xive.c
1027
struct xive_q *q;
arch/powerpc/kvm/book3s_xive.c
1035
q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive.c
1036
if (WARN_ON(!q->qpage))
arch/powerpc/kvm/book3s_xive.c
1040
max = (q->msk + 1) - XIVE_Q_GAP;
arch/powerpc/kvm/book3s_xive.c
1041
return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
arch/powerpc/kvm/book3s_xive.c
141
struct xive_q *q;
arch/powerpc/kvm/book3s_xive.c
161
q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive.c
162
idx = q->idx;
arch/powerpc/kvm/book3s_xive.c
163
toggle = q->toggle;
arch/powerpc/kvm/book3s_xive.c
171
qpage = READ_ONCE(q->qpage);
arch/powerpc/kvm/book3s_xive.c
178
hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
arch/powerpc/kvm/book3s_xive.c
1842
struct xive_q *q = &xc->queues[i];
arch/powerpc/kvm/book3s_xive.c
1844
xive_native_disable_queue(xc->vp_id, q, i);
arch/powerpc/kvm/book3s_xive.c
1845
if (q->qpage) {
arch/powerpc/kvm/book3s_xive.c
1846
free_pages((unsigned long)q->qpage,
arch/powerpc/kvm/book3s_xive.c
1848
q->qpage = NULL;
arch/powerpc/kvm/book3s_xive.c
198
q->idx = idx;
arch/powerpc/kvm/book3s_xive.c
1986
struct xive_q *q = &xc->queues[i];
arch/powerpc/kvm/book3s_xive.c
199
q->toggle = toggle;
arch/powerpc/kvm/book3s_xive.c
2002
q, i, NULL, 0, true);
arch/powerpc/kvm/book3s_xive.c
2109
static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
arch/powerpc/kvm/book3s_xive.c
2111
u32 idx = q->idx;
arch/powerpc/kvm/book3s_xive.c
2112
u32 toggle = q->toggle;
arch/powerpc/kvm/book3s_xive.c
2116
irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
arch/powerpc/kvm/book3s_xive.c
219
if (atomic_read(&q->pending_count)) {
arch/powerpc/kvm/book3s_xive.c
220
int p = atomic_xchg(&q->pending_count, 0);
arch/powerpc/kvm/book3s_xive.c
223
WARN_ON(p > atomic_read(&q->count));
arch/powerpc/kvm/book3s_xive.c
224
atomic_sub(p, &q->count);
arch/powerpc/kvm/book3s_xive.c
242
q->idx = idx;
arch/powerpc/kvm/book3s_xive.c
243
q->toggle = toggle;
arch/powerpc/kvm/book3s_xive.c
2812
struct xive_q *q = &xc->queues[i];
arch/powerpc/kvm/book3s_xive.c
2815
if (!q->qpage && !xc->esc_virq[i])
arch/powerpc/kvm/book3s_xive.c
2818
if (q->qpage) {
arch/powerpc/kvm/book3s_xive.c
2820
idx = q->idx;
arch/powerpc/kvm/book3s_xive.c
2821
i0 = be32_to_cpup(q->qpage + idx);
arch/powerpc/kvm/book3s_xive.c
2822
idx = (idx + 1) & q->msk;
arch/powerpc/kvm/book3s_xive.c
2823
i1 = be32_to_cpup(q->qpage + idx);
arch/powerpc/kvm/book3s_xive.c
2824
seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
arch/powerpc/kvm/book3s_xive.c
397
struct xive_q *q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive.c
405
idx = q->idx;
arch/powerpc/kvm/book3s_xive.c
406
toggle = q->toggle;
arch/powerpc/kvm/book3s_xive.c
407
qpage = READ_ONCE(q->qpage);
arch/powerpc/kvm/book3s_xive.c
449
idx = (idx + 1) & q->msk;
arch/powerpc/kvm/book3s_xive.c
870
struct xive_q *q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive.c
879
xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
arch/powerpc/kvm/book3s_xive.c
939
struct xive_q *q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive.c
943
if (WARN_ON(q->qpage))
arch/powerpc/kvm/book3s_xive.c
962
rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
arch/powerpc/kvm/book3s_xive_native.c
47
struct xive_q *q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive_native.c
49
xive_native_disable_queue(xc->vp_id, q, prio);
arch/powerpc/kvm/book3s_xive_native.c
50
if (q->qpage) {
arch/powerpc/kvm/book3s_xive_native.c
51
put_page(virt_to_page(q->qpage));
arch/powerpc/kvm/book3s_xive_native.c
52
q->qpage = NULL;
arch/powerpc/kvm/book3s_xive_native.c
56
static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
arch/powerpc/kvm/book3s_xive_native.c
572
struct xive_q *q;
arch/powerpc/kvm/book3s_xive_native.c
600
q = &xc->queues[priority];
arch/powerpc/kvm/book3s_xive_native.c
608
q->guest_qaddr = 0;
arch/powerpc/kvm/book3s_xive_native.c
609
q->guest_qshift = 0;
arch/powerpc/kvm/book3s_xive_native.c
61
__be32 *qpage_prev = q->qpage;
arch/powerpc/kvm/book3s_xive_native.c
611
rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
arch/powerpc/kvm/book3s_xive_native.c
63
rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
arch/powerpc/kvm/book3s_xive_native.c
670
q->guest_qaddr = kvm_eq.qaddr;
arch/powerpc/kvm/book3s_xive_native.c
671
q->guest_qshift = kvm_eq.qshift;
arch/powerpc/kvm/book3s_xive_native.c
678
rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
arch/powerpc/kvm/book3s_xive_native.c
713
struct xive_q *q;
arch/powerpc/kvm/book3s_xive_native.c
745
q = &xc->queues[priority];
arch/powerpc/kvm/book3s_xive_native.c
749
if (!q->qpage)
arch/powerpc/kvm/book3s_xive_native.c
761
kvm_eq.qshift = q->guest_qshift;
arch/powerpc/kvm/book3s_xive_native.c
762
kvm_eq.qaddr = q->guest_qaddr;
arch/powerpc/kvm/book3s_xive_native.c
901
struct xive_q *q = &xc->queues[prio];
arch/powerpc/kvm/book3s_xive_native.c
903
if (!q->qpage)
arch/powerpc/kvm/book3s_xive_native.c
908
mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
arch/powerpc/kvm/mpic.c
281
static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
arch/powerpc/kvm/mpic.c
283
set_bit(n_IRQ, q->queue);
arch/powerpc/kvm/mpic.c
286
static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
arch/powerpc/kvm/mpic.c
288
clear_bit(n_IRQ, q->queue);
arch/powerpc/kvm/mpic.c
291
static void IRQ_check(struct openpic *opp, struct irq_queue *q)
arch/powerpc/kvm/mpic.c
298
irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
arch/powerpc/kvm/mpic.c
311
q->next = next;
arch/powerpc/kvm/mpic.c
312
q->priority = priority;
arch/powerpc/kvm/mpic.c
315
static int IRQ_get_next(struct openpic *opp, struct irq_queue *q)
arch/powerpc/kvm/mpic.c
318
IRQ_check(opp, q);
arch/powerpc/kvm/mpic.c
320
return q->next;
arch/powerpc/lib/test-code-patching.c
206
void *p, *q;
arch/powerpc/lib/test-code-patching.c
221
q = p + 4;
arch/powerpc/lib/test-code-patching.c
222
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
223
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
224
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
231
q = buf + 0x2000000;
arch/powerpc/lib/test-code-patching.c
232
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
233
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
235
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
236
check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
arch/powerpc/lib/test-code-patching.c
243
q = buf + 4;
arch/powerpc/lib/test-code-patching.c
244
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
245
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
247
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
248
check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
arch/powerpc/lib/test-code-patching.c
255
q = buf + 0x1400000;
arch/powerpc/lib/test-code-patching.c
256
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
257
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
259
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
266
q = buf + 4;
arch/powerpc/lib/test-code-patching.c
267
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
268
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
270
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
281
q = buf + 4;
arch/powerpc/lib/test-code-patching.c
282
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
283
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
284
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
291
q = buf + 0x8000;
arch/powerpc/lib/test-code-patching.c
292
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
293
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
295
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
296
check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
arch/powerpc/lib/test-code-patching.c
303
q = buf + 4;
arch/powerpc/lib/test-code-patching.c
304
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
305
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
307
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
308
check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
arch/powerpc/lib/test-code-patching.c
315
q = buf + 0x5000;
arch/powerpc/lib/test-code-patching.c
316
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
317
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
319
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/lib/test-code-patching.c
326
q = buf + 4;
arch/powerpc/lib/test-code-patching.c
327
translate_branch(&instr, q, p);
arch/powerpc/lib/test-code-patching.c
328
ppc_inst_write(q, instr);
arch/powerpc/lib/test-code-patching.c
330
check(instr_is_branch_to_addr(q, addr));
arch/powerpc/math-emu/udivmodti4.c
10
_fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2],
arch/powerpc/math-emu/udivmodti4.c
190
q[0] = q0; q[1] = q1;
arch/powerpc/platforms/powermac/bootx_init.c
47
const char *p, *q, *s;
arch/powerpc/platforms/powermac/bootx_init.c
52
for (p = format; *p != 0; p = q) {
arch/powerpc/platforms/powermac/bootx_init.c
53
for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
arch/powerpc/platforms/powermac/bootx_init.c
55
if (q > p)
arch/powerpc/platforms/powermac/bootx_init.c
56
btext_drawtext(p, q - p);
arch/powerpc/platforms/powermac/bootx_init.c
57
if (*q == 0)
arch/powerpc/platforms/powermac/bootx_init.c
59
if (*q == '\n') {
arch/powerpc/platforms/powermac/bootx_init.c
60
++q;
arch/powerpc/platforms/powermac/bootx_init.c
66
++q;
arch/powerpc/platforms/powermac/bootx_init.c
67
if (*q == 0)
arch/powerpc/platforms/powermac/bootx_init.c
69
switch (*q) {
arch/powerpc/platforms/powermac/bootx_init.c
71
++q;
arch/powerpc/platforms/powermac/bootx_init.c
78
++q;
arch/powerpc/platforms/ps3/os-area.c
693
static DECLARE_WORK(q, os_area_queue_work_handler);
arch/powerpc/platforms/ps3/os-area.c
696
schedule_work(&q);
arch/powerpc/sysdev/xive/common.c
103
static u32 xive_read_eq(struct xive_q *q, bool just_peek)
arch/powerpc/sysdev/xive/common.c
107
if (!q->qpage)
arch/powerpc/sysdev/xive/common.c
109
cur = be32_to_cpup(q->qpage + q->idx);
arch/powerpc/sysdev/xive/common.c
112
if ((cur >> 31) == q->toggle)
arch/powerpc/sysdev/xive/common.c
118
q->idx = (q->idx + 1) & q->msk;
arch/powerpc/sysdev/xive/common.c
121
if (q->idx == 0)
arch/powerpc/sysdev/xive/common.c
122
q->toggle ^= 1;
arch/powerpc/sysdev/xive/common.c
158
struct xive_q *q;
arch/powerpc/sysdev/xive/common.c
1800
static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio)
arch/powerpc/sysdev/xive/common.c
1804
seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle);
arch/powerpc/sysdev/xive/common.c
1805
if (q->qpage) {
arch/powerpc/sysdev/xive/common.c
1806
for (i = 0; i < q->msk + 1; i++) {
arch/powerpc/sysdev/xive/common.c
1809
seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i),
arch/powerpc/sysdev/xive/common.c
189
q = &xc->queue[prio];
arch/powerpc/sysdev/xive/common.c
190
if (atomic_read(&q->pending_count)) {
arch/powerpc/sysdev/xive/common.c
191
int p = atomic_xchg(&q->pending_count, 0);
arch/powerpc/sysdev/xive/common.c
193
WARN_ON(p > atomic_read(&q->count));
arch/powerpc/sysdev/xive/common.c
194
atomic_sub(p, &q->count);
arch/powerpc/sysdev/xive/common.c
256
static notrace void xive_dump_eq(const char *name, struct xive_q *q)
arch/powerpc/sysdev/xive/common.c
260
if (!q->qpage)
arch/powerpc/sysdev/xive/common.c
262
idx = q->idx;
arch/powerpc/sysdev/xive/common.c
263
i0 = be32_to_cpup(q->qpage + idx);
arch/powerpc/sysdev/xive/common.c
264
idx = (idx + 1) & q->msk;
arch/powerpc/sysdev/xive/common.c
265
i1 = be32_to_cpup(q->qpage + idx);
arch/powerpc/sysdev/xive/common.c
267
q->idx, q->toggle, i0, i1);
arch/powerpc/sysdev/xive/common.c
506
struct xive_q *q = &xc->queue[xive_irq_priority];
arch/powerpc/sysdev/xive/common.c
514
max = (q->msk + 1) - 1;
arch/powerpc/sysdev/xive/common.c
515
return !!atomic_add_unless(&q->count, 1, max);
arch/powerpc/sysdev/xive/common.c
530
struct xive_q *q = &xc->queue[xive_irq_priority];
arch/powerpc/sysdev/xive/common.c
544
atomic_inc(&q->pending_count);
arch/powerpc/sysdev/xive/native.c
131
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
arch/powerpc/sysdev/xive/native.c
148
q->msk = order ? ((1u << (order - 2)) - 1) : 0;
arch/powerpc/sysdev/xive/native.c
149
q->idx = 0;
arch/powerpc/sysdev/xive/native.c
150
q->toggle = 0;
arch/powerpc/sysdev/xive/native.c
161
q->eoi_phys = be64_to_cpu(qeoi_page_be);
arch/powerpc/sysdev/xive/native.c
168
q->esc_irq = be32_to_cpu(esc_irq_be);
arch/powerpc/sysdev/xive/native.c
188
q->qpage = qpage;
arch/powerpc/sysdev/xive/native.c
195
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
arch/powerpc/sysdev/xive/native.c
210
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
arch/powerpc/sysdev/xive/native.c
212
__xive_native_disable_queue(vp_id, q, prio);
arch/powerpc/sysdev/xive/native.c
218
struct xive_q *q = &xc->queue[prio];
arch/powerpc/sysdev/xive/native.c
226
q, prio, qpage, xive_queue_shift, false);
arch/powerpc/sysdev/xive/native.c
231
struct xive_q *q = &xc->queue[prio];
arch/powerpc/sysdev/xive/native.c
238
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
arch/powerpc/sysdev/xive/native.c
240
free_pages((unsigned long)q->qpage, alloc_order);
arch/powerpc/sysdev/xive/native.c
241
q->qpage = NULL;
arch/powerpc/sysdev/xive/spapr.c
479
static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
arch/powerpc/sysdev/xive/spapr.c
497
q->msk = order ? ((1u << (order - 2)) - 1) : 0;
arch/powerpc/sysdev/xive/spapr.c
498
q->idx = 0;
arch/powerpc/sysdev/xive/spapr.c
499
q->toggle = 0;
arch/powerpc/sysdev/xive/spapr.c
510
q->eoi_phys = esn_page;
arch/powerpc/sysdev/xive/spapr.c
522
q->qpage = qpage;
arch/powerpc/sysdev/xive/spapr.c
534
struct xive_q *q = &xc->queue[prio];
arch/powerpc/sysdev/xive/spapr.c
542
q, prio, qpage, xive_queue_shift);
arch/powerpc/sysdev/xive/spapr.c
548
struct xive_q *q = &xc->queue[prio];
arch/powerpc/sysdev/xive/spapr.c
560
uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
arch/powerpc/sysdev/xive/spapr.c
561
free_pages((unsigned long)q->qpage, alloc_order);
arch/powerpc/sysdev/xive/spapr.c
562
q->qpage = NULL;
arch/powerpc/xmon/nonstdio.c
41
const char *p = ptr, *q;
arch/powerpc/xmon/nonstdio.c
51
while (paginating && (q = strchr(p, '\n'))) {
arch/powerpc/xmon/nonstdio.c
52
rv += udbg_write(p, q - p + 1);
arch/powerpc/xmon/nonstdio.c
53
p = q + 1;
arch/powerpc/xmon/xmon.c
2206
char *p, *q;
arch/powerpc/xmon/xmon.c
2213
q = (char *)buf;
arch/powerpc/xmon/xmon.c
2216
*(u16 *)q = *(u16 *)p;
arch/powerpc/xmon/xmon.c
2219
*(u32 *)q = *(u32 *)p;
arch/powerpc/xmon/xmon.c
2222
*(u64 *)q = *(u64 *)p;
arch/powerpc/xmon/xmon.c
2226
*q++ = *p++;
arch/powerpc/xmon/xmon.c
2243
char *p, *q;
arch/powerpc/xmon/xmon.c
2256
q = (char *) buf;
arch/powerpc/xmon/xmon.c
2259
*(u16 *)p = *(u16 *)q;
arch/powerpc/xmon/xmon.c
2262
*(u32 *)p = *(u32 *)q;
arch/powerpc/xmon/xmon.c
2265
*(u64 *)p = *(u64 *)q;
arch/powerpc/xmon/xmon.c
2269
*p++ = *q++;
arch/riscv/include/asm/io.h
127
__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
arch/riscv/include/asm/io.h
130
__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
arch/riscv/include/asm/io.h
133
__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
arch/riscv/include/asm/io.h
136
__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
arch/riscv/include/uapi/asm/ptrace.h
98
struct __riscv_q_ext_state q;
arch/riscv/kernel/compat_signal.c
63
for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
arch/riscv/kernel/compat_signal.c
66
err = __get_user(value, &sc_fpregs->q.reserved[i]);
arch/riscv/kernel/compat_signal.c
89
for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
arch/riscv/kernel/compat_signal.c
90
err = __put_user(0, &sc_fpregs->q.reserved[i]);
arch/riscv/kernel/cpufeature.c
498
__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
arch/s390/include/asm/scsw.h
89
u32 q:1;
arch/s390/kernel/perf_cpum_cf.c
150
struct cpu_cf_ptr *q = per_cpu_ptr(p, cpu);
arch/s390/kernel/perf_cpum_cf.c
152
return q->cpucf;
arch/s390/pci/pci_event.c
29
u32 q : 1; /* event qualifier */
arch/sh/include/asm/io.h
119
__BUILD_MEMORY_STRING(__raw_, q, u64)
arch/sh/include/asm/io.h
191
BUILDIO_IOPORT(q, u64)
arch/sh/include/asm/io.h
220
__BUILD_IOPORT_STRING(q, u64)
arch/sh/include/asm/io.h
86
__BUILD_UNCACHED_IO(q, u64)
arch/sh/lib/div64-generic.c
14
uint64_t q = __xdiv64_32(*xp, y);
arch/sh/lib/div64-generic.c
16
rem = *xp - q * y;
arch/sh/lib/div64-generic.c
17
*xp = q;
arch/sh/math-emu/sfp-util.h
32
#define udiv_qrnnd(q, r, n1, n0, d) \
arch/sh/math-emu/sfp-util.h
65
(q) = (UWtype) __q1 * __ll_B | __q0; \
arch/sparc/include/asm/io_64.h
188
static inline void writeq(u64 q, volatile void __iomem *addr)
arch/sparc/include/asm/io_64.h
192
: "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
arch/sparc/include/asm/io_64.h
339
static inline void sbus_writeq(u64 q, volatile void __iomem *addr)
arch/sparc/include/asm/io_64.h
341
__raw_writeq(q, addr);
arch/sparc/include/asm/io_64.h
94
static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
arch/sparc/include/asm/io_64.h
98
: "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
arch/sparc/include/asm/upa.h
93
static inline void _upa_writeq(unsigned long q, unsigned long addr)
arch/sparc/include/asm/upa.h
97
: "r" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
arch/sparc/kernel/ldc.c
1003
if (!q)
arch/sparc/kernel/ldc.c
1009
free_pages((unsigned long)q, order);
arch/sparc/kernel/ldc.c
979
void *q;
arch/sparc/kernel/ldc.c
984
q = (void *) __get_free_pages(GFP_KERNEL, order);
arch/sparc/kernel/ldc.c
985
if (!q) {
arch/sparc/kernel/ldc.c
991
memset(q, 0, PAGE_SIZE << order);
arch/sparc/kernel/ldc.c
993
*base = q;
arch/sparc/kernel/ldc.c
994
*ra = __pa(q);
arch/sparc/kernel/ldc.c
999
static void free_queue(unsigned long num_entries, struct ldc_packet *q)
arch/sparc/math-emu/math_32.c
272
u64 q[2];
arch/sparc/math-emu/math_64.c
164
u64 q[2];
arch/sparc/math-emu/math_64.c
457
case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
arch/sparc/math-emu/math_64.c
458
case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
arch/sparc/math-emu/math_64.c
459
case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
arch/sparc/math-emu/sfp-util_32.h
102
: "=&r" (q), \
arch/sparc/math-emu/sfp-util_32.h
77
#define udiv_qrnnd(q, r, n1, n0, d) \
arch/sparc/math-emu/sfp-util_64.h
108
(q) = (UWtype) (__q1 << 32) | __q0; \
arch/sparc/math-emu/sfp-util_64.h
76
#define udiv_qrnnd(q, r, n1, n0, d) \
arch/um/drivers/ubd_kern.c
452
blk_queue_disable_discard(io_req->req->q);
arch/um/drivers/ubd_kern.c
454
blk_queue_disable_write_zeroes(io_req->req->q);
arch/x86/boot/compressed/sev.c
403
sev_status = m.q;
arch/x86/boot/compressed/sev.c
452
return m.q;
arch/x86/boot/compressed/sev.c
502
boot_svsm_caa_pa = m.q;
arch/x86/boot/compressed/sev.h
25
return m.q;
arch/x86/boot/compressed/sev.h
32
m.q = val;
arch/x86/coco/sev/vc-shared.c
557
ghcb_set_xss(ghcb, m.q);
arch/x86/include/asm/asm.h
33
inst##q##__VA_ARGS__)
arch/x86/include/asm/msr.h
261
int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
arch/x86/include/asm/msr.h
262
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
arch/x86/include/asm/msr.h
267
int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
arch/x86/include/asm/msr.h
268
int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
arch/x86/include/asm/msr.h
282
static inline int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/include/asm/msr.h
284
rdmsrq(msr_no, *q);
arch/x86/include/asm/msr.h
287
static inline int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/include/asm/msr.h
289
wrmsrq(msr_no, q);
arch/x86/include/asm/msr.h
311
static inline int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/include/asm/msr.h
313
return rdmsrq_safe(msr_no, q);
arch/x86/include/asm/msr.h
315
static inline int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/include/asm/msr.h
317
return wrmsrq_safe(msr_no, q);
arch/x86/include/asm/msr.h
332
#define rdmsrl_on_cpu(cpu, msr, q) rdmsrq_on_cpu(cpu, msr, q)
arch/x86/include/asm/shared/msr.h
11
u64 q;
arch/x86/kernel/cpu/common.c
827
char *p, *q, *s;
arch/x86/kernel/cpu/common.c
839
p = q = s = &c->x86_model_id[0];
arch/x86/kernel/cpu/common.c
847
s = q;
arch/x86/kernel/cpu/common.c
849
*q++ = *p++;
arch/x86/kernel/ptrace.c
862
#define R32(l,q) \
arch/x86/kernel/ptrace.c
864
regs->q = value; break
arch/x86/kernel/ptrace.c
961
#define R32(l,q) \
arch/x86/kernel/ptrace.c
963
*val = regs->q; break
arch/x86/kvm/emulate.c
322
ON64(case 8: __EM_ASM_1(op##q, rax); break;) \
arch/x86/kvm/emulate.c
331
ON64(case 8: __EM_ASM_1(op##q, rcx); break;) \
arch/x86/kvm/emulate.c
340
ON64(case 8: __EM_ASM_1_EX(op##q, rcx); break;) \
arch/x86/kvm/emulate.c
349
ON64(case 8: __EM_ASM_2(op##q, rax, rdx); break;) \
arch/x86/kvm/emulate.c
358
ON64(case 8: __EM_ASM_2(op##q, rdx, rax); break;) \
arch/x86/kvm/emulate.c
367
ON64(case 8: __EM_ASM_2(op##q, rax, rdx); break;) \
arch/x86/kvm/emulate.c
376
ON64(case 8: __EM_ASM_2(op##q, rax, cl); break;) \
arch/x86/kvm/emulate.c
385
ON64(case 8: __EM_ASM_3(op##q, rax, rdx, cl); break;) \
arch/x86/kvm/svm/sev.c
2905
struct list_head *pos, *q;
arch/x86/kvm/svm/sev.c
2935
list_for_each_safe(pos, q, head) {
arch/x86/kvm/vmx/nested.c
1669
int i, q;
arch/x86/kvm/vmx/nested.c
1676
for (q = 0; q < ARRAY_SIZE(fields); q++) {
arch/x86/kvm/vmx/nested.c
1677
for (i = 0; i < max_fields[q]; i++) {
arch/x86/kvm/vmx/nested.c
1678
field = fields[q][i];
arch/x86/lib/msr-smp.c
207
int wrmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/lib/msr-smp.c
215
rv.reg.q = q;
arch/x86/lib/msr-smp.c
223
int rdmsrq_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/lib/msr-smp.c
229
*q = (u64)high << 32 | low;
arch/x86/lib/msr-smp.c
50
int rdmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
arch/x86/lib/msr-smp.c
59
*q = rv.reg.q;
arch/x86/lib/msr-smp.c
81
int wrmsrq_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
arch/x86/lib/msr-smp.c
89
rv.reg.q = q;
arch/x86/lib/msr.c
47
m->q = val;
arch/x86/lib/msr.c
62
return wrmsrq_safe(msr, m->q);
arch/x86/lib/msr.c
79
m1.q |= BIT_64(bit);
arch/x86/lib/msr.c
81
m1.q &= ~BIT_64(bit);
arch/x86/lib/msr.c
83
if (m1.q == m.q)
arch/x86/math-emu/fpu_trig.c
109
if (((q > 0)
arch/x86/math-emu/fpu_trig.c
111
|| (q > 1)) {
arch/x86/math-emu/fpu_trig.c
115
significand(&tmp) = q;
arch/x86/math-emu/fpu_trig.c
138
q++;
arch/x86/math-emu/fpu_trig.c
148
return (q & 3) | even;
arch/x86/math-emu/fpu_trig.c
22
unsigned long long st1, unsigned long long q, int n);
arch/x86/math-emu/fpu_trig.c
279
int q;
arch/x86/math-emu/fpu_trig.c
300
if ((q = trig_arg(st0_ptr, 0)) == -1) {
arch/x86/math-emu/fpu_trig.c
306
setsign(st0_ptr, (q & 1) ^ (arg_sign != 0));
arch/x86/math-emu/fpu_trig.c
38
unsigned long long q;
arch/x86/math-emu/fpu_trig.c
555
int q;
arch/x86/math-emu/fpu_trig.c
558
if ((q = trig_arg(st0_ptr, 0)) == -1) {
arch/x86/math-emu/fpu_trig.c
56
q = significand(&tmp);
arch/x86/math-emu/fpu_trig.c
565
if (q & 2)
arch/x86/math-emu/fpu_trig.c
57
if (q) {
arch/x86/math-emu/fpu_trig.c
61
q, exponent(st0_ptr) - exponent(&CONST_PI2));
arch/x86/math-emu/fpu_trig.c
623
int q;
arch/x86/math-emu/fpu_trig.c
636
} else if ((q = trig_arg(st0_ptr, FCOS)) != -1) {
arch/x86/math-emu/fpu_trig.c
639
if ((q + 1) & 2)
arch/x86/math-emu/fpu_trig.c
67
if ((even && !(q & 1)) || (!even && (q & 1))) {
arch/x86/math-emu/fpu_trig.c
752
unsigned long long st1, unsigned long long q, int n)
arch/x86/math-emu/fpu_trig.c
765
:"2"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[0])
arch/x86/math-emu/fpu_trig.c
770
:"1"(((unsigned *)&st1)[1]), "m"(((unsigned *)&q)[0])
arch/x86/math-emu/fpu_trig.c
775
:"1"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[1])
arch/x86/math-emu/fpu_trig.c
78
|| (q > 1)) {
arch/x86/math-emu/fpu_trig.c
796
long long q;
arch/x86/math-emu/fpu_trig.c
82
significand(&tmp) = q + 1;
arch/x86/math-emu/fpu_trig.c
828
q = significand(&tmp);
arch/x86/math-emu/fpu_trig.c
833
q, expdif);
arch/x86/math-emu/fpu_trig.c
838
q = 0;
arch/x86/math-emu/fpu_trig.c
859
&& (q & 1))) {
arch/x86/math-emu/fpu_trig.c
862
q++;
arch/x86/math-emu/fpu_trig.c
867
if (q & 4)
arch/x86/math-emu/fpu_trig.c
869
if (q & 2)
arch/x86/math-emu/fpu_trig.c
871
if (q & 1)
arch/x86/math-emu/fpu_trig.c
98
q++;
arch/x86/power/cpu.c
419
msr_array[i].info.reg.q = 0;
arch/x86/power/cpu.c
48
rdmsrq(msr->info.msr_no, msr->info.reg.q);
arch/x86/power/cpu.c
60
wrmsrq(msr->info.msr_no, msr->info.reg.q);
arch/x86/xen/platform-pci-unplug.c
181
char *p, *q;
arch/x86/xen/platform-pci-unplug.c
184
for (p = arg; p; p = q) {
arch/x86/xen/platform-pci-unplug.c
185
q = strchr(p, ',');
arch/x86/xen/platform-pci-unplug.c
186
if (q) {
arch/x86/xen/platform-pci-unplug.c
187
l = q - p;
arch/x86/xen/platform-pci-unplug.c
188
q++;
block/bfq-cgroup.c
1142
lockdep_assert_held(&blkg->q->queue_lock);
block/bfq-cgroup.c
343
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
block/bfq-cgroup.c
408
lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
block/bfq-cgroup.c
533
struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
block/bfq-iosched.c
2363
struct request_queue *q)
block/bfq-iosched.c
2382
static void bfq_remove_request(struct request_queue *q,
block/bfq-iosched.c
2404
elv_rqhash_del(q, rq);
block/bfq-iosched.c
2405
if (q->last_merge == rq)
block/bfq-iosched.c
2406
q->last_merge = NULL;
block/bfq-iosched.c
2447
static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
block/bfq-iosched.c
2450
struct bfq_data *bfqd = q->elevator->elevator_data;
block/bfq-iosched.c
2451
struct bfq_io_cq *bic = bfq_bic_lookup(q);
block/bfq-iosched.c
2471
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
block/bfq-iosched.c
2480
static int bfq_request_merge(struct request_queue *q, struct request **req,
block/bfq-iosched.c
2483
struct bfq_data *bfqd = q->elevator->elevator_data;
block/bfq-iosched.c
2486
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
block/bfq-iosched.c
2498
static void bfq_request_merged(struct request_queue *q, struct request *req,
block/bfq-iosched.c
2555
static void bfq_requests_merged(struct request_queue *q, struct request *rq,
block/bfq-iosched.c
2588
bfq_remove_request(next->q, next);
block/bfq-iosched.c
3230
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
block/bfq-iosched.c
3233
struct bfq_data *bfqd = q->elevator->elevator_data;
block/bfq-iosched.c
3650
static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
block/bfq-iosched.c
3667
bfq_update_peak_rate(q->elevator->elevator_data, rq);
block/bfq-iosched.c
3669
bfq_remove_request(q, rq);
block/bfq-iosched.c
438
return bic->icq.q->elevator->elevator_data;
block/bfq-iosched.c
455
static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
block/bfq-iosched.c
460
return icq_to_bic(ioc_lookup_icq(q));
block/bfq-iosched.c
5246
static void bfq_update_dispatch_stats(struct request_queue *q,
block/bfq-iosched.c
5269
spin_lock_irq(&q->queue_lock);
block/bfq-iosched.c
5288
spin_unlock_irq(&q->queue_lock);
block/bfq-iosched.c
5291
static inline void bfq_update_dispatch_stats(struct request_queue *q,
block/bfq-iosched.c
6203
static void bfq_update_insert_stats(struct request_queue *q,
block/bfq-iosched.c
6221
spin_lock_irq(&q->queue_lock);
block/bfq-iosched.c
6225
spin_unlock_irq(&q->queue_lock);
block/bfq-iosched.c
6228
static inline void bfq_update_insert_stats(struct request_queue *q,
block/bfq-iosched.c
6239
struct request_queue *q = hctx->queue;
block/bfq-iosched.c
6240
struct bfq_data *bfqd = q->elevator->elevator_data;
block/bfq-iosched.c
6248
bfqg_stats_update_legacy_io(q, rq);
block/bfq-iosched.c
6252
if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
block/bfq-iosched.c
6274
elv_rqhash_add(q, rq);
block/bfq-iosched.c
6275
if (!q->last_merge)
block/bfq-iosched.c
6276
q->last_merge = rq;
block/bfq-iosched.c
6288
bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
block/bfq-iosched.c
6808
rq->elv.icq = ioc_find_get_icq(rq->q);
block/bfq-iosched.c
6929
struct request_queue *q = rq->q;
block/bfq-iosched.c
6931
struct bfq_data *bfqd = q->elevator->elevator_data;
block/bfq-iosched.c
695
struct bfq_data *bfqd = data->q->elevator->elevator_data;
block/bfq-iosched.c
696
struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
block/bfq-iosched.c
701
limit = data->q->nr_requests;
block/bfq-iosched.c
7112
static void bfq_depth_updated(struct request_queue *q)
block/bfq-iosched.c
7114
struct bfq_data *bfqd = q->elevator->elevator_data;
block/bfq-iosched.c
7115
unsigned int async_depth = q->async_depth;
block/bfq-iosched.c
7138
blk_mq_set_min_shallow_depth(q, 1);
block/bfq-iosched.c
7195
static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
block/bfq-iosched.c
7199
struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;
block/bfq-iosched.c
7201
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
block/bfq-iosched.c
7207
spin_lock_irq(&q->queue_lock);
block/bfq-iosched.c
7208
q->elevator = eq;
block/bfq-iosched.c
7209
spin_unlock_irq(&q->queue_lock);
block/bfq-iosched.c
7235
bfqd->queue = q;
block/bfq-iosched.c
7242
spin_lock_irq(&q->queue_lock);
block/bfq-iosched.c
725
if (limit < data->q->nr_requests)
block/bfq-iosched.c
7266
bfqd->nr_sectors[0] = get_capacity(q->disk);
block/bfq-iosched.c
7268
spin_unlock_irq(&q->queue_lock);
block/bfq-iosched.c
7345
bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
block/bfq-iosched.c
7350
bfq_depth_updated(q);
block/bfq-iosched.c
7353
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
block/bfq-iosched.c
7355
blk_queue_flag_set(QUEUE_FLAG_DISABLE_WBT_DEF, q);
block/bfq-iosched.c
7356
wbt_disable_default(q->disk);
block/bfq-iosched.c
7357
blk_stat_enable_accounting(q);
block/bfq-iosched.c
7358
q->async_depth = (q->nr_requests * 3) >> 2;
block/bfq-iosched.h
1066
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
block/bio-integrity.c
164
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/bio-integrity.c
173
if (bvec_try_merge_hw_page(q, bv, page, len, offset)) {
block/bio-integrity.c
179
min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
block/bio-integrity.c
186
if (bvec_gap_to_prev(&q->limits, bv, offset))
block/bio-integrity.c
307
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/bio-integrity.c
318
if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
block/bio-integrity.c
332
blk_lim_dma_alignment_and_pad(&q->limits);
block/bio-integrity.c
334
if (blk_queue_pci_p2pdma(q))
block/bio-integrity.c
346
if (nr_bvecs > queue_max_integrity_segments(q))
block/bio.c
1000
unsigned long mask = queue_segment_boundary(q);
block/bio.c
1006
if (len > queue_max_segment_size(q) - bv->bv_len)
block/bio.c
997
bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
block/blk-cgroup-rwstat.c
104
lockdep_assert_held(&blkg->q->queue_lock);
block/blk-cgroup.c
1001
struct request_queue *q = ctx->bdev->bd_queue;
block/blk-cgroup.c
1004
blk_mq_unfreeze_queue(q, memflags);
block/blk-cgroup.c
117
struct request_queue *q = blkg->q;
block/blk-cgroup.c
1242
spin_lock_irq(&blkg->q->queue_lock);
block/blk-cgroup.c
1244
spin_unlock_irq(&blkg->q->queue_lock);
block/blk-cgroup.c
127
mutex_lock(&q->blkcg_mutex);
block/blk-cgroup.c
1314
struct request_queue *q = blkg->q;
block/blk-cgroup.c
1316
if (need_resched() || !spin_trylock(&q->queue_lock)) {
block/blk-cgroup.c
1329
spin_unlock(&q->queue_lock);
block/blk-cgroup.c
133
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
135
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
136
mutex_unlock(&q->blkcg_mutex);
block/blk-cgroup.c
138
blk_put_queue(q);
block/blk-cgroup.c
1489
void blkg_init_queue(struct request_queue *q)
block/blk-cgroup.c
1491
INIT_LIST_HEAD(&q->blkg_list);
block/blk-cgroup.c
1492
mutex_init(&q->blkcg_mutex);
block/blk-cgroup.c
1497
struct request_queue *q = disk->queue;
block/blk-cgroup.c
1509
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
1513
q->root_blkg = blkg;
block/blk-cgroup.c
1514
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
1522
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
1580
struct request_queue *q = disk->queue;
block/blk-cgroup.c
1586
if (blkcg_policy_enabled(q, pol))
block/blk-cgroup.c
1597
if (queue_is_mq(q))
block/blk-cgroup.c
1598
memflags = blk_mq_freeze_queue(q);
block/blk-cgroup.c
1600
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
1603
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
block/blk-cgroup.c
1628
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
1656
__set_bit(pol->plid, q->blkcg_pols);
block/blk-cgroup.c
1659
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
1661
if (queue_is_mq(q))
block/blk-cgroup.c
1662
blk_mq_unfreeze_queue(q, memflags);
block/blk-cgroup.c
1671
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
1672
list_for_each_entry(blkg, &q->blkg_list, q_node) {
block/blk-cgroup.c
1687
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
1704
struct request_queue *q = disk->queue;
block/blk-cgroup.c
1708
if (!blkcg_policy_enabled(q, pol))
block/blk-cgroup.c
1711
if (queue_is_mq(q))
block/blk-cgroup.c
1712
memflags = blk_mq_freeze_queue(q);
block/blk-cgroup.c
1714
mutex_lock(&q->blkcg_mutex);
block/blk-cgroup.c
1715
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
1717
__clear_bit(pol->plid, q->blkcg_pols);
block/blk-cgroup.c
1719
list_for_each_entry(blkg, &q->blkg_list, q_node) {
block/blk-cgroup.c
1732
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
1733
mutex_unlock(&q->blkcg_mutex);
block/blk-cgroup.c
1735
if (queue_is_mq(q))
block/blk-cgroup.c
1736
blk_mq_unfreeze_queue(q, memflags);
block/blk-cgroup.c
316
blkg->q = disk->queue;
block/blk-cgroup.c
468
struct request_queue *q = disk->queue;
block/blk-cgroup.c
474
blkg = blkg_lookup(blkcg, q);
block/blk-cgroup.c
478
spin_lock_irqsave(&q->queue_lock, flags);
block/blk-cgroup.c
479
blkg = blkg_lookup(blkcg, q);
block/blk-cgroup.c
495
struct blkcg_gq *ret_blkg = q->root_blkg;
block/blk-cgroup.c
498
blkg = blkg_lookup(parent, q);
block/blk-cgroup.c
518
spin_unlock_irqrestore(&q->queue_lock, flags);
block/blk-cgroup.c
527
lockdep_assert_held(&blkg->q->queue_lock);
block/blk-cgroup.c
551
radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
block/blk-cgroup.c
571
struct request_queue *q = disk->queue;
block/blk-cgroup.c
577
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
578
list_for_each_entry(blkg, &q->blkg_list, q_node) {
block/blk-cgroup.c
594
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
609
__clear_bit(pol->plid, q->blkcg_pols);
block/blk-cgroup.c
612
q->root_blkg = NULL;
block/blk-cgroup.c
613
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
682
if (!blkg->q->disk)
block/blk-cgroup.c
684
return bdi_dev_name(blkg->q->disk->bdi);
block/blk-cgroup.c
716
spin_lock_irq(&blkg->q->queue_lock);
block/blk-cgroup.c
717
if (blkcg_policy_enabled(blkg->q, pol))
block/blk-cgroup.c
719
spin_unlock_irq(&blkg->q->queue_lock);
block/blk-cgroup.c
866
struct request_queue *q;
block/blk-cgroup.c
875
q = disk->queue;
block/blk-cgroup.c
878
mutex_lock(&q->blkcg_mutex);
block/blk-cgroup.c
879
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
881
if (!blkcg_policy_enabled(q, pol)) {
block/blk-cgroup.c
886
blkg = blkg_lookup(blkcg, q);
block/blk-cgroup.c
900
while (parent && !blkg_lookup(parent, q)) {
block/blk-cgroup.c
906
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
920
spin_lock_irq(&q->queue_lock);
block/blk-cgroup.c
922
if (!blkcg_policy_enabled(q, pol)) {
block/blk-cgroup.c
928
blkg = blkg_lookup(pos, q);
block/blk-cgroup.c
945
mutex_unlock(&q->blkcg_mutex);
block/blk-cgroup.c
952
spin_unlock_irq(&q->queue_lock);
block/blk-cgroup.c
954
mutex_unlock(&q->blkcg_mutex);
block/blk-cgroup.h
194
void blkg_init_queue(struct request_queue *q);
block/blk-cgroup.h
256
struct request_queue *q)
block/blk-cgroup.h
261
return q->root_blkg;
block/blk-cgroup.h
264
lockdep_is_held(&q->queue_lock));
block/blk-cgroup.h
265
if (blkg && blkg->q == q)
block/blk-cgroup.h
268
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
block/blk-cgroup.h
269
if (blkg && blkg->q != q)
block/blk-cgroup.h
356
(p_blkg)->q)))
block/blk-cgroup.h
371
(p_blkg)->q)))
block/blk-cgroup.h
457
static inline bool blkcg_policy_enabled(struct request_queue *q,
block/blk-cgroup.h
460
return pol && test_bit(pol->plid, q->blkcg_pols);
block/blk-cgroup.h
480
static inline void blkg_init_queue(struct request_queue *q) { }
block/blk-cgroup.h
498
#define blk_queue_for_each_rl(rl, q) \
block/blk-cgroup.h
499
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
block/blk-cgroup.h
58
struct request_queue *q;
block/blk-core.c
1107
int blk_lld_busy(struct request_queue *q)
block/blk-core.c
1109
if (queue_is_mq(q) && q->mq_ops->busy)
block/blk-core.c
1110
return q->mq_ops->busy(q);
block/blk-core.c
220
void blk_sync_queue(struct request_queue *q)
block/blk-core.c
222
timer_delete_sync(&q->timeout);
block/blk-core.c
223
cancel_work_sync(&q->timeout_work);
block/blk-core.c
231
void blk_set_pm_only(struct request_queue *q)
block/blk-core.c
233
atomic_inc(&q->pm_only);
block/blk-core.c
237
void blk_clear_pm_only(struct request_queue *q)
block/blk-core.c
241
pm_only = atomic_dec_return(&q->pm_only);
block/blk-core.c
244
wake_up_all(&q->mq_freeze_wq);
block/blk-core.c
250
struct request_queue *q = container_of(rcu_head,
block/blk-core.c
253
percpu_ref_exit(&q->q_usage_counter);
block/blk-core.c
254
kmem_cache_free(blk_requestq_cachep, q);
block/blk-core.c
257
static void blk_free_queue(struct request_queue *q)
block/blk-core.c
259
blk_free_queue_stats(q->stats);
block/blk-core.c
260
if (queue_is_mq(q))
block/blk-core.c
261
blk_mq_release(q);
block/blk-core.c
263
ida_free(&blk_queue_ida, q->id);
block/blk-core.c
264
lockdep_unregister_key(&q->io_lock_cls_key);
block/blk-core.c
265
lockdep_unregister_key(&q->q_lock_cls_key);
block/blk-core.c
266
call_rcu(&q->rcu_head, blk_free_queue_rcu);
block/blk-core.c
276
void blk_put_queue(struct request_queue *q)
block/blk-core.c
278
if (refcount_dec_and_test(&q->refs))
block/blk-core.c
279
blk_free_queue(q);
block/blk-core.c
283
bool blk_queue_start_drain(struct request_queue *q)
block/blk-core.c
290
bool freeze = __blk_freeze_queue_start(q, current);
block/blk-core.c
291
if (queue_is_mq(q))
block/blk-core.c
292
blk_mq_wake_waiters(q);
block/blk-core.c
294
wake_up_all(&q->mq_freeze_wq);
block/blk-core.c
304
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
block/blk-core.c
308
while (!blk_try_enter_queue(q, pm)) {
block/blk-core.c
320
wait_event(q->mq_freeze_wq,
block/blk-core.c
321
(!q->mq_freeze_depth &&
block/blk-core.c
322
blk_pm_resume_queue(pm, q)) ||
block/blk-core.c
323
blk_queue_dying(q));
block/blk-core.c
324
if (blk_queue_dying(q))
block/blk-core.c
328
rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_);
block/blk-core.c
329
rwsem_release(&q->q_lockdep_map, _RET_IP_);
block/blk-core.c
333
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
block/blk-core.c
335
while (!blk_try_enter_queue(q, false)) {
block/blk-core.c
353
wait_event(q->mq_freeze_wq,
block/blk-core.c
354
(!q->mq_freeze_depth &&
block/blk-core.c
355
blk_pm_resume_queue(false, q)) ||
block/blk-core.c
361
rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
block/blk-core.c
362
rwsem_release(&q->io_lockdep_map, _RET_IP_);
block/blk-core.c
369
void blk_queue_exit(struct request_queue *q)
block/blk-core.c
371
percpu_ref_put(&q->q_usage_counter);
block/blk-core.c
376
struct request_queue *q =
block/blk-core.c
379
wake_up_all(&q->mq_freeze_wq);
block/blk-core.c
384
struct request_queue *q = timer_container_of(q, t, timeout);
block/blk-core.c
386
kblockd_schedule_work(&q->timeout_work);
block/blk-core.c
395
struct request_queue *q;
block/blk-core.c
398
q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
block/blk-core.c
400
if (!q)
block/blk-core.c
403
q->last_merge = NULL;
block/blk-core.c
405
q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
block/blk-core.c
406
if (q->id < 0) {
block/blk-core.c
407
error = q->id;
block/blk-core.c
411
q->stats = blk_alloc_queue_stats();
block/blk-core.c
412
if (!q->stats) {
block/blk-core.c
420
q->limits = *lim;
block/blk-core.c
422
q->node = node_id;
block/blk-core.c
424
atomic_set(&q->nr_active_requests_shared_tags, 0);
block/blk-core.c
426
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
block/blk-core.c
427
INIT_WORK(&q->timeout_work, blk_timeout_work);
block/blk-core.c
428
INIT_LIST_HEAD(&q->icq_list);
block/blk-core.c
430
refcount_set(&q->refs, 1);
block/blk-core.c
431
mutex_init(&q->debugfs_mutex);
block/blk-core.c
432
mutex_init(&q->elevator_lock);
block/blk-core.c
433
mutex_init(&q->sysfs_lock);
block/blk-core.c
434
mutex_init(&q->limits_lock);
block/blk-core.c
435
mutex_init(&q->rq_qos_mutex);
block/blk-core.c
436
spin_lock_init(&q->queue_lock);
block/blk-core.c
438
init_waitqueue_head(&q->mq_freeze_wq);
block/blk-core.c
439
mutex_init(&q->mq_freeze_lock);
block/blk-core.c
441
blkg_init_queue(q);
block/blk-core.c
447
error = percpu_ref_init(&q->q_usage_counter,
block/blk-core.c
452
lockdep_register_key(&q->io_lock_cls_key);
block/blk-core.c
453
lockdep_register_key(&q->q_lock_cls_key);
block/blk-core.c
454
lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)",
block/blk-core.c
455
&q->io_lock_cls_key, 0);
block/blk-core.c
456
lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)",
block/blk-core.c
457
&q->q_lock_cls_key, 0);
block/blk-core.c
461
rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
block/blk-core.c
462
rwsem_release(&q->io_lockdep_map, _RET_IP_);
block/blk-core.c
465
q->nr_requests = BLKDEV_DEFAULT_RQ;
block/blk-core.c
466
q->async_depth = BLKDEV_DEFAULT_RQ;
block/blk-core.c
468
return q;
block/blk-core.c
471
blk_free_queue_stats(q->stats);
block/blk-core.c
473
ida_free(&blk_queue_ida, q->id);
block/blk-core.c
475
kmem_cache_free(blk_requestq_cachep, q);
block/blk-core.c
487
bool blk_get_queue(struct request_queue *q)
block/blk-core.c
489
if (unlikely(blk_queue_dying(q)))
block/blk-core.c
491
refcount_inc(&q->refs);
block/blk-core.c
597
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
block/blk-core.c
615
if (nr_sectors > q->limits.chunk_sectors)
block/blk-core.c
619
if (nr_sectors > q->limits.max_zone_append_sectors)
block/blk-core.c
681
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk-core.c
699
if (q == bdev_get_queue(bio->bi_bdev))
block/blk-core.c
759
static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
block/blk-core.c
762
if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
block/blk-core.c
765
if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
block/blk-core.c
783
struct request_queue *q = bdev_get_queue(bdev);
block/blk-core.c
80
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
block/blk-core.c
82
set_bit(flag, &q->queue_flags);
block/blk-core.c
835
status = blk_validate_atomic_write_op_size(q, bio);
block/blk-core.c
855
status = blk_check_zone_append(q, bio);
block/blk-core.c
860
if (!q->limits.max_write_zeroes_sectors)
block/blk-core.c
91
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
block/blk-core.c
93
clear_bit(flag, &q->queue_flags);
block/blk-core.c
946
struct request_queue *q;
block/blk-core.c
953
q = bdev_get_queue(bdev);
block/blk-core.c
968
if (!percpu_ref_tryget(&q->q_usage_counter))
block/blk-core.c
970
if (queue_is_mq(q)) {
block/blk-core.c
971
ret = blk_mq_poll(q, cookie, iob, flags);
block/blk-core.c
973
struct gendisk *disk = q->disk;
block/blk-core.c
975
if ((q->limits.features & BLK_FEAT_POLL) && disk &&
block/blk-core.c
979
blk_queue_exit(q);
block/blk-crypto-profile.c
454
struct request_queue *q)
block/blk-crypto-profile.c
456
if (blk_integrity_queue_supports_integrity(q)) {
block/blk-crypto-profile.c
460
q->crypto_profile = profile;
block/blk-crypto-sysfs.c
166
struct request_queue *q = disk->queue;
block/blk-crypto-sysfs.c
170
if (!q->crypto_profile)
block/blk-crypto-sysfs.c
176
obj->profile = q->crypto_profile;
block/blk-crypto-sysfs.c
184
q->crypto_kobject = &obj->kobj;
block/blk-crypto.c
224
return blk_crypto_get_keyslot(rq->q->crypto_profile,
block/blk-crypto.c
421
struct request_queue *q = bdev_get_queue(bdev);
block/blk-crypto.c
425
err = __blk_crypto_evict_key(q->crypto_profile, key);
block/blk-flush.c
126
struct block_device *part = rq->q->disk->part0;
block/blk-flush.c
152
struct request_queue *q = rq->q;
block/blk-flush.c
176
spin_lock(&q->requeue_lock);
block/blk-flush.c
177
list_move(&rq->queuelist, &q->requeue_list);
block/blk-flush.c
178
spin_unlock(&q->requeue_lock);
block/blk-flush.c
179
blk_mq_kick_requeue_list(q);
block/blk-flush.c
198
blk_kick_flush(q, fq, cmd_flags);
block/blk-flush.c
205
struct request_queue *q = flush_rq->q;
block/blk-flush.c
232
if (!q->elevator) {
block/blk-flush.c
276
static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
block/blk-flush.c
300
blk_rq_init(q, flush_rq);
block/blk-flush.c
313
if (!q->elevator)
block/blk-flush.c
331
spin_lock(&q->requeue_lock);
block/blk-flush.c
332
list_add_tail(&flush_rq->queuelist, &q->flush_list);
block/blk-flush.c
333
spin_unlock(&q->requeue_lock);
block/blk-flush.c
335
blk_mq_kick_requeue_list(q);
block/blk-flush.c
342
struct request_queue *q = rq->q;
block/blk-flush.c
348
if (q->elevator) {
block/blk-flush.c
386
struct request_queue *q = rq->q;
block/blk-flush.c
388
bool supports_fua = q->limits.features & BLK_FEAT_FUA;
block/blk-flush.c
400
if (blk_queue_write_cache(q)) {
block/blk-flush.c
94
static void blk_kick_flush(struct request_queue *q,
block/blk-ia-ranges.c
111
struct request_queue *q = disk->queue;
block/blk-ia-ranges.c
114
lockdep_assert_held(&q->sysfs_lock);
block/blk-ia-ranges.c
153
struct request_queue *q = disk->queue;
block/blk-ia-ranges.c
157
lockdep_assert_held(&q->sysfs_lock);
block/blk-ia-ranges.c
288
struct request_queue *q = disk->queue;
block/blk-ia-ranges.c
290
mutex_lock(&q->sysfs_lock);
block/blk-ia-ranges.c
309
if (blk_queue_registered(q))
block/blk-ia-ranges.c
312
mutex_unlock(&q->sysfs_lock);
block/blk-integrity.c
134
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
block/blk-integrity.c
140
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
block/blk-integrity.c
161
q->limits.max_integrity_segments)
block/blk-integrity.c
170
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
block/blk-integrity.c
190
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
block/blk-integrity.c
192
q->limits.max_integrity_segments)
block/blk-integrity.c
229
struct request_queue *q = dev_to_disk(dev)->queue;
block/blk-integrity.c
239
lim = queue_limits_start_update(q);
block/blk-integrity.c
245
err = queue_limits_commit_update_frozen(q, &lim);
block/blk-integrity.c
28
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
block/blk-integrity.c
39
if (!biovec_phys_mergeable(q, &ivprv, &iv))
block/blk-integrity.c
41
if (seg_size + iv.bv_len > queue_max_segment_size(q))
block/blk-ioc.c
114
struct request_queue *q = icq->q;
block/blk-ioc.c
116
if (spin_trylock(&q->queue_lock)) {
block/blk-ioc.c
118
spin_unlock(&q->queue_lock);
block/blk-ioc.c
125
spin_lock(&q->queue_lock);
block/blk-ioc.c
130
spin_unlock(&q->queue_lock);
block/blk-ioc.c
164
void ioc_clear_queue(struct request_queue *q)
block/blk-ioc.c
166
spin_lock_irq(&q->queue_lock);
block/blk-ioc.c
167
while (!list_empty(&q->icq_list)) {
block/blk-ioc.c
169
list_first_entry(&q->icq_list, struct io_cq, q_node);
block/blk-ioc.c
179
spin_unlock_irq(&q->queue_lock);
block/blk-ioc.c
318
struct io_cq *ioc_lookup_icq(struct request_queue *q)
block/blk-ioc.c
331
if (icq && icq->q == q)
block/blk-ioc.c
334
icq = radix_tree_lookup(&ioc->icq_tree, q->id);
block/blk-ioc.c
335
if (icq && icq->q == q)
block/blk-ioc.c
355
static struct io_cq *ioc_create_icq(struct request_queue *q)
block/blk-ioc.c
358
struct elevator_type *et = q->elevator->type;
block/blk-ioc.c
363
q->node);
block/blk-ioc.c
373
icq->q = q;
block/blk-ioc.c
378
spin_lock_irq(&q->queue_lock);
block/blk-ioc.c
381
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
block/blk-ioc.c
383
list_add(&icq->q_node, &q->icq_list);
block/blk-ioc.c
388
icq = ioc_lookup_icq(q);
block/blk-ioc.c
394
spin_unlock_irq(&q->queue_lock);
block/blk-ioc.c
399
struct io_cq *ioc_find_get_icq(struct request_queue *q)
block/blk-ioc.c
405
ioc = alloc_io_context(GFP_ATOMIC, q->node);
block/blk-ioc.c
41
struct elevator_type *et = icq->q->elevator->type;
block/blk-ioc.c
421
icq = ioc_lookup_icq(q);
block/blk-ioc.c
425
icq = ioc_create_icq(q);
block/blk-ioc.c
69
struct request_queue *q = icq->q;
block/blk-ioc.c
70
struct elevator_type *et = q->elevator->type;
block/blk-ioc.c
73
lockdep_assert_held(&q->queue_lock);
block/blk-ioc.c
78
radix_tree_delete(&ioc->icq_tree, icq->q->id);
block/blk-iocost.c
2986
struct ioc *ioc = q_to_ioc(blkg->q);
block/blk-iocost.c
3410
struct request_queue *q;
block/blk-iocost.c
3425
q = bdev_get_queue(ctx.bdev);
block/blk-iocost.c
3426
if (!queue_is_mq(q)) {
block/blk-iocost.c
3431
ioc = q_to_ioc(q);
block/blk-iocost.c
3436
ioc = q_to_ioc(q);
block/blk-iocost.c
3439
memflags = blk_mq_freeze_queue(q);
block/blk-iocost.c
3440
blk_mq_quiesce_queue(q);
block/blk-iocost.c
3490
blk_mq_unquiesce_queue(q);
block/blk-iocost.c
3491
blk_mq_unfreeze_queue(q, memflags);
block/blk-iocost.c
3499
blk_mq_unquiesce_queue(q);
block/blk-iocost.c
3500
blk_mq_unfreeze_queue(q, memflags);
block/blk-iocost.c
665
static struct ioc *q_to_ioc(struct request_queue *q)
block/blk-iocost.c
667
return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
block/blk-iolatency.c
745
struct request_queue *q = blkiolat->rqos.disk->queue;
block/blk-iolatency.c
751
blk_queue_flag_set(QUEUE_FLAG_BIO_ISSUE_TIME, q);
block/blk-iolatency.c
753
blk_queue_flag_clear(QUEUE_FLAG_BIO_ISSUE_TIME, q);
block/blk-iolatency.c
986
struct rq_qos *rqos = iolat_rq_qos(blkg->q);
block/blk-iolatency.c
991
iolat->ssd = !blk_queue_rot(blkg->q);
block/blk-map.c
428
const struct queue_limits *lim = &rq->q->limits;
block/blk-map.c
445
rq->phys_gap_bit = bio_seg_gap(rq->q, rq->biotail, bio,
block/blk-map.c
465
unsigned int max_bytes = rq->q->limits.max_hw_sectors << SECTOR_SHIFT;
block/blk-map.c
48
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
block/blk-map.c
499
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
block/blk-map.c
504
unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
block/blk-map.c
517
else if (queue_virt_boundary(q))
block/blk-map.c
518
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
block/blk-map.c
555
int blk_rq_map_user(struct request_queue *q, struct request *rq,
block/blk-map.c
565
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
block/blk-map.c
594
ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
block/blk-map.c
598
ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
block/blk-map.c
659
if (len > (queue_max_hw_sectors(rq->q) << SECTOR_SHIFT))
block/blk-map.c
664
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
block/blk-merge.c
1013
static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
block/blk-merge.c
1018
if (segments >= queue_max_discard_segments(q))
block/blk-merge.c
1024
rq_qos_merge(q, req, bio);
block/blk-merge.c
1034
req_set_nomerge(q, req);
block/blk-merge.c
1038
static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
block/blk-merge.c
1049
if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1053
if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1057
return bio_attempt_discard_merge(q, rq, bio);
block/blk-merge.c
1085
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
block/blk-merge.c
1095
if (rq->q == q)
block/blk-merge.c
1096
return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
block/blk-merge.c
1102
if (rq->q != q)
block/blk-merge.c
1104
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
block/blk-merge.c
1116
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
block/blk-merge.c
1126
switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
block/blk-merge.c
1141
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
block/blk-merge.c
1146
switch (elv_merge(q, &rq, bio)) {
block/blk-merge.c
1148
if (!blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1152
*merged_request = attempt_back_merge(q, rq);
block/blk-merge.c
1154
elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
block/blk-merge.c
1157
if (!blk_mq_sched_allow_merge(q, rq, bio))
block/blk-merge.c
1161
*merged_request = attempt_front_merge(q, rq);
block/blk-merge.c
1163
elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
block/blk-merge.c
1166
return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
block/blk-merge.c
502
if (queue_max_discard_segments(rq->q) > 1) {
block/blk-merge.c
51
static inline bool bio_will_gap(struct request_queue *q,
block/blk-merge.c
517
bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
block/blk-merge.c
525
struct request_queue *q = rq->q;
block/blk-merge.c
526
struct queue_limits *lim = &q->limits;
block/blk-merge.c
531
return q->limits.max_hw_sectors;
block/blk-merge.c
550
if (blk_integrity_merge_bio(req->q, req, bio) == false)
block/blk-merge.c
56
if (!bio_has_data(prev) || !queue_virt_boundary(q))
block/blk-merge.c
566
req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
block/blk-merge.c
571
req_set_nomerge(req->q, req);
block/blk-merge.c
586
req_set_nomerge(req->q, req);
block/blk-merge.c
605
req_set_nomerge(req->q, req);
block/blk-merge.c
612
static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
block/blk-merge.c
617
if (segments >= queue_max_discard_segments(q))
block/blk-merge.c
626
req_set_nomerge(q, req);
block/blk-merge.c
630
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
block/blk-merge.c
652
if (blk_integrity_merge_rq(q, req, next) == false)
block/blk-merge.c
68
if (pb.bv_offset & queue_virt_boundary(q))
block/blk-merge.c
755
u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
block/blk-merge.c
768
if (!biovec_phys_mergeable(q, &pb, &nb))
block/blk-merge.c
777
static struct request *attempt_merge(struct request_queue *q,
block/blk-merge.c
805
if (!req_attempt_discard_merge(q, req, next))
block/blk-merge.c
809
if (!ll_merge_requests_fn(q, req, next))
block/blk-merge.c
82
if (biovec_phys_mergeable(q, &pb, &nb))
block/blk-merge.c
837
req->phys_gap_bit = bio_seg_gap(req->q, req->biotail, next->bio,
block/blk-merge.c
84
return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
block/blk-merge.c
846
elv_merge_requests(q, req, next);
block/blk-merge.c
865
static struct request *attempt_back_merge(struct request_queue *q,
block/blk-merge.c
868
struct request *next = elv_latter_request(q, rq);
block/blk-merge.c
871
return attempt_merge(q, rq, next);
block/blk-merge.c
876
static struct request *attempt_front_merge(struct request_queue *q,
block/blk-merge.c
879
struct request *prev = elv_former_request(q, rq);
block/blk-merge.c
882
return attempt_merge(q, prev, rq);
block/blk-merge.c
89
return bio_will_gap(req->q, req, req->biotail, bio);
block/blk-merge.c
892
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
block/blk-merge.c
895
return attempt_merge(q, rq, next);
block/blk-merge.c
908
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
block/blk-merge.c
94
return bio_will_gap(req->q, NULL, bio, req->bio);
block/blk-merge.c
953
rq_qos_merge(req->q, req, bio);
block/blk-merge.c
963
req->phys_gap_bit = bio_seg_gap(req->q, req->biotail, bio,
block/blk-merge.c
992
rq_qos_merge(req->q, req, bio);
block/blk-merge.c
999
req->phys_gap_bit = bio_seg_gap(req->q, bio, req->bio,
block/blk-mq-debugfs.c
105
struct request_queue *q = data;
block/blk-mq-debugfs.c
108
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
block/blk-mq-debugfs.c
117
struct request_queue *q = data;
block/blk-mq-debugfs.c
124
if (blk_queue_dying(q))
block/blk-mq-debugfs.c
136
blk_mq_run_hw_queues(q, true);
block/blk-mq-debugfs.c
138
blk_mq_start_stopped_hw_queues(q, true);
block/blk-mq-debugfs.c
140
blk_mq_kick_requeue_list(q);
block/blk-mq-debugfs.c
23
__acquires(&q->requeue_lock)
block/blk-mq-debugfs.c
25
struct request_queue *q = m->private;
block/blk-mq-debugfs.c
265
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
block/blk-mq-debugfs.c
27
spin_lock_irq(&q->requeue_lock);
block/blk-mq-debugfs.c
28
return seq_list_start(&q->requeue_list, *pos);
block/blk-mq-debugfs.c
33
struct request_queue *q = m->private;
block/blk-mq-debugfs.c
35
return seq_list_next(v, &q->requeue_list, pos);
block/blk-mq-debugfs.c
39
__releases(&q->requeue_lock)
block/blk-mq-debugfs.c
409
struct request_queue *q = hctx->queue;
block/blk-mq-debugfs.c
41
struct request_queue *q = m->private;
block/blk-mq-debugfs.c
412
res = mutex_lock_interruptible(&q->elevator_lock);
block/blk-mq-debugfs.c
417
mutex_unlock(&q->elevator_lock);
block/blk-mq-debugfs.c
425
struct request_queue *q = hctx->queue;
block/blk-mq-debugfs.c
428
res = mutex_lock_interruptible(&q->elevator_lock);
block/blk-mq-debugfs.c
43
spin_unlock_irq(&q->requeue_lock);
block/blk-mq-debugfs.c
433
mutex_unlock(&q->elevator_lock);
block/blk-mq-debugfs.c
441
struct request_queue *q = hctx->queue;
block/blk-mq-debugfs.c
444
res = mutex_lock_interruptible(&q->elevator_lock);
block/blk-mq-debugfs.c
449
mutex_unlock(&q->elevator_lock);
block/blk-mq-debugfs.c
457
struct request_queue *q = hctx->queue;
block/blk-mq-debugfs.c
460
res = mutex_lock_interruptible(&q->elevator_lock);
block/blk-mq-debugfs.c
465
mutex_unlock(&q->elevator_lock);
block/blk-mq-debugfs.c
611
static void debugfs_create_files(struct request_queue *q, struct dentry *parent,
block/blk-mq-debugfs.c
615
lockdep_assert_held(&q->debugfs_mutex);
block/blk-mq-debugfs.c
620
lockdep_assert_not_held(&q->elevator_lock);
block/blk-mq-debugfs.c
621
lockdep_assert_not_held(&q->rq_qos_mutex);
block/blk-mq-debugfs.c
631
void blk_mq_debugfs_register(struct request_queue *q)
block/blk-mq-debugfs.c
636
debugfs_create_files(q, q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
block/blk-mq-debugfs.c
638
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-debugfs.c
640
blk_mq_debugfs_register_hctx(q, hctx);
block/blk-mq-debugfs.c
643
blk_mq_debugfs_register_rq_qos(q);
block/blk-mq-debugfs.c
659
void blk_mq_debugfs_register_hctx(struct request_queue *q,
block/blk-mq-debugfs.c
666
if (!q->debugfs_dir)
block/blk-mq-debugfs.c
670
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
block/blk-mq-debugfs.c
672
debugfs_create_files(q, hctx->debugfs_dir, hctx,
block/blk-mq-debugfs.c
688
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
block/blk-mq-debugfs.c
694
memflags = blk_debugfs_lock(q);
block/blk-mq-debugfs.c
695
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-debugfs.c
696
blk_mq_debugfs_register_hctx(q, hctx);
block/blk-mq-debugfs.c
697
blk_debugfs_unlock(q, memflags);
block/blk-mq-debugfs.c
700
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
block/blk-mq-debugfs.c
705
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-debugfs.c
709
void blk_mq_debugfs_register_sched(struct request_queue *q)
block/blk-mq-debugfs.c
711
struct elevator_type *e = q->elevator->type;
block/blk-mq-debugfs.c
713
lockdep_assert_held(&q->debugfs_mutex);
block/blk-mq-debugfs.c
719
if (!q->debugfs_dir)
block/blk-mq-debugfs.c
725
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
block/blk-mq-debugfs.c
727
debugfs_create_files(q, q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
block/blk-mq-debugfs.c
730
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
block/blk-mq-debugfs.c
732
lockdep_assert_held(&q->debugfs_mutex);
block/blk-mq-debugfs.c
734
debugfs_remove_recursive(q->sched_debugfs_dir);
block/blk-mq-debugfs.c
735
q->sched_debugfs_dir = NULL;
block/blk-mq-debugfs.c
75
struct request_queue *q = data;
block/blk-mq-debugfs.c
753
struct request_queue *q = rqos->disk->queue;
block/blk-mq-debugfs.c
756
lockdep_assert_held(&q->debugfs_mutex);
block/blk-mq-debugfs.c
761
if (!q->rqos_debugfs_dir)
block/blk-mq-debugfs.c
762
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
block/blk-mq-debugfs.c
763
q->debugfs_dir);
block/blk-mq-debugfs.c
765
rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
block/blk-mq-debugfs.c
766
debugfs_create_files(q, rqos->debugfs_dir, rqos,
block/blk-mq-debugfs.c
77
seq_printf(m, "%d\n", atomic_read(&q->pm_only));
block/blk-mq-debugfs.c
770
void blk_mq_debugfs_register_rq_qos(struct request_queue *q)
block/blk-mq-debugfs.c
772
lockdep_assert_held(&q->debugfs_mutex);
block/blk-mq-debugfs.c
774
if (q->rq_qos) {
block/blk-mq-debugfs.c
775
struct rq_qos *rqos = q->rq_qos;
block/blk-mq-debugfs.c
784
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
block/blk-mq-debugfs.c
787
struct elevator_type *e = q->elevator->type;
block/blk-mq-debugfs.c
789
lockdep_assert_held(&q->debugfs_mutex);
block/blk-mq-debugfs.c
804
debugfs_create_files(q, hctx->sched_debugfs_dir, hctx,
block/blk-mq-debugfs.h
23
void blk_mq_debugfs_register(struct request_queue *q);
block/blk-mq-debugfs.h
24
void blk_mq_debugfs_register_hctx(struct request_queue *q,
block/blk-mq-debugfs.h
27
void blk_mq_debugfs_register_hctxs(struct request_queue *q);
block/blk-mq-debugfs.h
28
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
block/blk-mq-debugfs.h
30
void blk_mq_debugfs_register_sched(struct request_queue *q);
block/blk-mq-debugfs.h
31
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
block/blk-mq-debugfs.h
32
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
block/blk-mq-debugfs.h
36
void blk_mq_debugfs_register_rq_qos(struct request_queue *q);
block/blk-mq-debugfs.h
38
static inline void blk_mq_debugfs_register(struct request_queue *q)
block/blk-mq-debugfs.h
42
static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
block/blk-mq-debugfs.h
51
static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
block/blk-mq-debugfs.h
55
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
block/blk-mq-debugfs.h
59
static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
block/blk-mq-debugfs.h
63
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
block/blk-mq-debugfs.h
67
static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
block/blk-mq-debugfs.h
76
static inline void blk_mq_debugfs_register_rq_qos(struct request_queue *q)
block/blk-mq-dma.c
345
unsigned len = bio_integrity_bytes(&req->q->limits.integrity,
block/blk-mq-dma.c
38
max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
block/blk-mq-dma.c
404
struct request_queue *q = rq->q;
block/blk-mq-dma.c
434
BUG_ON(segments > queue_max_integrity_segments(q));
block/blk-mq-dma.c
55
!biovec_phys_mergeable(req->q, &bv, &next))
block/blk-mq-sched.c
112
budget_token = blk_mq_get_dispatch_budget(q);
block/blk-mq-sched.c
118
blk_mq_put_dispatch_budget(q, budget_token);
block/blk-mq-sched.c
154
blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
block/blk-mq-sched.c
215
struct request_queue *q = hctx->queue;
block/blk-mq-sched.c
232
budget_token = blk_mq_get_dispatch_budget(q);
block/blk-mq-sched.c
238
blk_mq_put_dispatch_budget(q, budget_token);
block/blk-mq-sched.c
246
blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
block/blk-mq-sched.c
319
struct request_queue *q = hctx->queue;
block/blk-mq-sched.c
322
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
block/blk-mq-sched.c
335
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
block/blk-mq-sched.c
338
struct elevator_queue *e = q->elevator;
block/blk-mq-sched.c
345
ret = e->type->ops.bio_merge(q, bio, nr_segs);
block/blk-mq-sched.c
349
ctx = blk_mq_get_ctx(q);
block/blk-mq-sched.c
362
if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
block/blk-mq-sched.c
370
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
block/blk-mq-sched.c
373
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
block/blk-mq-sched.c
378
static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
block/blk-mq-sched.c
383
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-sched.c
387
q->sched_shared_tags = NULL;
block/blk-mq-sched.c
390
void blk_mq_sched_reg_debugfs(struct request_queue *q)
block/blk-mq-sched.c
396
memflags = blk_debugfs_lock(q);
block/blk-mq-sched.c
397
blk_mq_debugfs_register_sched(q);
block/blk-mq-sched.c
398
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-sched.c
399
blk_mq_debugfs_register_sched_hctx(q, hctx);
block/blk-mq-sched.c
400
blk_debugfs_unlock(q, memflags);
block/blk-mq-sched.c
403
void blk_mq_sched_unreg_debugfs(struct request_queue *q)
block/blk-mq-sched.c
408
blk_debugfs_lock_nomemsave(q);
block/blk-mq-sched.c
409
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-sched.c
411
blk_mq_debugfs_unregister_sched(q);
block/blk-mq-sched.c
412
blk_debugfs_unlock_nomemrestore(q);
block/blk-mq-sched.c
448
struct request_queue *q;
block/blk-mq-sched.c
453
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq-sched.c
461
if (q->elevator) {
block/blk-mq-sched.c
462
ctx = xa_load(elv_tbl, q->id);
block/blk-mq-sched.c
486
struct request_queue *q;
block/blk-mq-sched.c
491
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq-sched.c
496
if (xa_insert(elv_tbl, q->id, ctx, GFP_KERNEL)) {
block/blk-mq-sched.c
548
int blk_mq_alloc_sched_res(struct request_queue *q,
block/blk-mq-sched.c
553
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq-sched.c
560
res->data = blk_mq_alloc_sched_data(q, type);
block/blk-mq-sched.c
573
struct request_queue *q;
block/blk-mq-sched.c
578
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq-sched.c
586
if (q->elevator) {
block/blk-mq-sched.c
587
ctx = xa_load(elv_tbl, q->id);
block/blk-mq-sched.c
593
ret = blk_mq_alloc_sched_res(q, q->elevator->type,
block/blk-mq-sched.c
602
list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
block/blk-mq-sched.c
603
if (q->elevator) {
block/blk-mq-sched.c
604
ctx = xa_load(elv_tbl, q->id);
block/blk-mq-sched.c
614
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
block/blk-mq-sched.c
617
unsigned int flags = q->tag_set->flags;
block/blk-mq-sched.c
624
eq = elevator_alloc(q, e, res);
block/blk-mq-sched.c
628
q->nr_requests = et->nr_requests;
block/blk-mq-sched.c
632
q->sched_shared_tags = et->tags[0];
block/blk-mq-sched.c
633
blk_mq_tag_update_sched_shared_tags(q, et->nr_requests);
block/blk-mq-sched.c
636
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-sched.c
638
hctx->sched_tags = q->sched_shared_tags;
block/blk-mq-sched.c
643
ret = e->ops.init_sched(q, eq);
block/blk-mq-sched.c
647
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-sched.c
651
blk_mq_exit_sched(q, eq);
block/blk-mq-sched.c
660
blk_mq_sched_tags_teardown(q, flags);
block/blk-mq-sched.c
662
q->elevator = NULL;
block/blk-mq-sched.c
670
void blk_mq_sched_free_rqs(struct request_queue *q)
block/blk-mq-sched.c
675
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
block/blk-mq-sched.c
676
blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
block/blk-mq-sched.c
679
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-sched.c
681
blk_mq_free_rqs(q->tag_set,
block/blk-mq-sched.c
687
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
block/blk-mq-sched.c
693
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-sched.c
703
blk_mq_sched_tags_teardown(q, flags);
block/blk-mq-sched.c
704
set_bit(ELEVATOR_FLAG_DYING, &q->elevator->flags);
block/blk-mq-sched.c
705
q->elevator = NULL;
block/blk-mq-sched.c
87
struct request_queue *q = hctx->queue;
block/blk-mq-sched.c
88
struct elevator_queue *e = q->elevator;
block/blk-mq-sched.h
10
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
block/blk-mq-sched.h
106
struct request_queue *q = rq->q;
block/blk-mq-sched.h
107
struct elevator_queue *e = q->elevator;
block/blk-mq-sched.h
12
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
block/blk-mq-sched.h
129
static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
block/blk-mq-sched.h
135
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-sched.h
14
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
block/blk-mq-sched.h
21
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
block/blk-mq-sched.h
23
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
block/blk-mq-sched.h
24
void blk_mq_sched_free_rqs(struct request_queue *q);
block/blk-mq-sched.h
28
int blk_mq_alloc_sched_res(struct request_queue *q,
block/blk-mq-sched.h
51
static inline void *blk_mq_alloc_sched_data(struct request_queue *q,
block/blk-mq-sched.h
59
sched_data = e->ops.alloc_sched_data(q);
block/blk-mq-sched.h
81
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
block/blk-mq-sched.h
85
struct elevator_queue *e = q->elevator;
block/blk-mq-sched.h
88
return e->type->ops.allow_merge(q, rq, bio);
block/blk-mq-sched.h
96
struct elevator_queue *e = rq->q->elevator;
block/blk-mq-sysfs.c
161
struct request_queue *q = hctx->queue;
block/blk-mq-sysfs.c
168
ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
block/blk-mq-sysfs.c
193
void blk_mq_sysfs_deinit(struct request_queue *q)
block/blk-mq-sysfs.c
199
ctx = per_cpu_ptr(q->queue_ctx, cpu);
block/blk-mq-sysfs.c
202
kobject_put(q->mq_kobj);
block/blk-mq-sysfs.c
205
void blk_mq_sysfs_init(struct request_queue *q)
block/blk-mq-sysfs.c
210
kobject_init(q->mq_kobj, &blk_mq_ktype);
block/blk-mq-sysfs.c
213
ctx = per_cpu_ptr(q->queue_ctx, cpu);
block/blk-mq-sysfs.c
215
kobject_get(q->mq_kobj);
block/blk-mq-sysfs.c
222
struct request_queue *q = disk->queue;
block/blk-mq-sysfs.c
227
ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
block/blk-mq-sysfs.c
231
kobject_uevent(q->mq_kobj, KOBJ_ADD);
block/blk-mq-sysfs.c
233
mutex_lock(&q->tag_set->tag_list_lock);
block/blk-mq-sysfs.c
234
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-sysfs.c
239
mutex_unlock(&q->tag_set->tag_list_lock);
block/blk-mq-sysfs.c
243
queue_for_each_hw_ctx(q, hctx, j) {
block/blk-mq-sysfs.c
247
mutex_unlock(&q->tag_set->tag_list_lock);
block/blk-mq-sysfs.c
249
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
block/blk-mq-sysfs.c
250
kobject_del(q->mq_kobj);
block/blk-mq-sysfs.c
256
struct request_queue *q = disk->queue;
block/blk-mq-sysfs.c
260
mutex_lock(&q->tag_set->tag_list_lock);
block/blk-mq-sysfs.c
261
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-sysfs.c
263
mutex_unlock(&q->tag_set->tag_list_lock);
block/blk-mq-sysfs.c
265
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
block/blk-mq-sysfs.c
266
kobject_del(q->mq_kobj);
block/blk-mq-sysfs.c
269
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
block/blk-mq-sysfs.c
274
if (!blk_queue_registered(q))
block/blk-mq-sysfs.c
277
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq-sysfs.c
281
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
block/blk-mq-sysfs.c
287
if (!blk_queue_registered(q))
block/blk-mq-sysfs.c
290
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-sysfs.c
53
struct request_queue *q;
block/blk-mq-sysfs.c
58
q = hctx->queue;
block/blk-mq-sysfs.c
63
mutex_lock(&q->elevator_lock);
block/blk-mq-sysfs.c
65
mutex_unlock(&q->elevator_lock);
block/blk-mq-tag.c
112
if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
block/blk-mq-tag.c
195
data->ctx = blk_mq_get_ctx(data->q);
block/blk-mq-tag.c
249
struct request_queue *q;
block/blk-mq-tag.c
270
struct request_queue *q = iter_data->q;
block/blk-mq-tag.c
271
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq-tag.c
291
if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
block/blk-mq-tag.c
312
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
block/blk-mq-tag.c
321
.q = q,
block/blk-mq-tag.c
496
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
block/blk-mq-tag.c
506
if (!percpu_ref_tryget(&q->q_usage_counter))
block/blk-mq-tag.c
509
srcu_idx = srcu_read_lock(&q->tag_set->tags_srcu);
block/blk-mq-tag.c
510
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
block/blk-mq-tag.c
511
struct blk_mq_tags *tags = q->tag_set->shared_tags;
block/blk-mq-tag.c
516
bt_for_each(NULL, q, bresv, fn, priv, true);
block/blk-mq-tag.c
517
bt_for_each(NULL, q, btags, fn, priv, false);
block/blk-mq-tag.c
52
struct request_queue *q = hctx->queue;
block/blk-mq-tag.c
522
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq-tag.c
535
bt_for_each(hctx, q, bresv, fn, priv, true);
block/blk-mq-tag.c
536
bt_for_each(hctx, q, btags, fn, priv, false);
block/blk-mq-tag.c
539
srcu_read_unlock(&q->tag_set->tags_srcu, srcu_idx);
block/blk-mq-tag.c
54
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
block/blk-mq-tag.c
540
blk_queue_exit(q);
block/blk-mq-tag.c
55
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
block/blk-mq-tag.c
625
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
block/blk-mq-tag.c
628
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
block/blk-mq-tag.c
629
nr - q->tag_set->reserved_tags);
block/blk-mq-tag.c
90
struct request_queue *q = hctx->queue;
block/blk-mq-tag.c
93
&q->queue_flags))
block/blk-mq.c
1095
if (!blk_queue_passthrough_stat(req->q))
block/blk-mq.c
1125
if (!blk_queue_io_stat(req->q))
block/blk-mq.c
1142
req->part = req->q->disk->part0;
block/blk-mq.c
116
static bool blk_freeze_set_owner(struct request_queue *q,
block/blk-mq.c
1167
rq_qos_done(rq->q, rq);
block/blk-mq.c
1189
struct request_queue *q = hctx->queue;
block/blk-mq.c
1194
percpu_ref_put_many(&q->q_usage_counter, nr_tags);
block/blk-mq.c
1217
rq_qos_done(rq->q, rq);
block/blk-mq.c
122
if (!q->mq_freeze_depth) {
block/blk-mq.c
123
q->mq_freeze_owner = owner;
block/blk-mq.c
124
q->mq_freeze_owner_depth = 1;
block/blk-mq.c
125
q->mq_freeze_disk_dead = !q->disk ||
block/blk-mq.c
1253
rq->q->mq_ops->complete(rq);
block/blk-mq.c
126
test_bit(GD_DEAD, &q->disk->state) ||
block/blk-mq.c
127
!blk_queue_registered(q);
block/blk-mq.c
1277
!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
block/blk-mq.c
128
q->mq_freeze_queue_dying = blk_queue_dying(q);
block/blk-mq.c
1290
(!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
block/blk-mq.c
132
if (owner == q->mq_freeze_owner)
block/blk-mq.c
133
q->mq_freeze_owner_depth += 1;
block/blk-mq.c
1338
if (rq->q->nr_hw_queues == 1) {
block/blk-mq.c
1356
rq->q->mq_ops->complete(rq);
block/blk-mq.c
1370
struct request_queue *q = rq->q;
block/blk-mq.c
1374
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
block/blk-mq.c
1379
rq_qos_issue(q, rq);
block/blk-mq.c
138
static bool blk_unfreeze_check_owner(struct request_queue *q)
block/blk-mq.c
140
if (q->mq_freeze_owner != current)
block/blk-mq.c
1413
trace_block_plug(rq->q);
block/blk-mq.c
1415
(!blk_queue_nomerges(rq->q) &&
block/blk-mq.c
1419
trace_block_plug(rq->q);
block/blk-mq.c
142
if (--q->mq_freeze_owner_depth == 0) {
block/blk-mq.c
1422
if (!plug->multiple_queues && last && last->q != rq->q)
block/blk-mq.c
143
q->mq_freeze_owner = NULL;
block/blk-mq.c
1493
blk_hctx_poll(rq->q, rq->mq_hctx, NULL, BLK_POLL_ONESHOT);
block/blk-mq.c
151
static bool blk_freeze_set_owner(struct request_queue *q,
block/blk-mq.c
1536
struct request_queue *q = rq->q;
block/blk-mq.c
1541
rq_qos_requeue(q, rq);
block/blk-mq.c
1551
struct request_queue *q = rq->q;
block/blk-mq.c
1559
spin_lock_irqsave(&q->requeue_lock, flags);
block/blk-mq.c
1560
list_add_tail(&rq->queuelist, &q->requeue_list);
block/blk-mq.c
1561
spin_unlock_irqrestore(&q->requeue_lock, flags);
block/blk-mq.c
1564
blk_mq_kick_requeue_list(q);
block/blk-mq.c
157
static bool blk_unfreeze_check_owner(struct request_queue *q)
block/blk-mq.c
1570
struct request_queue *q =
block/blk-mq.c
1576
spin_lock_irq(&q->requeue_lock);
block/blk-mq.c
1577
list_splice_init(&q->requeue_list, &rq_list);
block/blk-mq.c
1578
list_splice_init(&q->flush_list, &flush_list);
block/blk-mq.c
1579
spin_unlock_irq(&q->requeue_lock);
block/blk-mq.c
1602
blk_mq_run_hw_queues(q, false);
block/blk-mq.c
1605
void blk_mq_kick_requeue_list(struct request_queue *q)
block/blk-mq.c
1607
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
block/blk-mq.c
1611
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
block/blk-mq.c
1614
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
block/blk-mq.c
163
bool __blk_freeze_queue_start(struct request_queue *q,
block/blk-mq.c
1636
if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
block/blk-mq.c
1648
bool blk_mq_queue_inflight(struct request_queue *q)
block/blk-mq.c
1652
blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
block/blk-mq.c
1660
if (req->q->mq_ops->timeout) {
block/blk-mq.c
1663
ret = req->q->mq_ops->timeout(req);
block/blk-mq.c
168
mutex_lock(&q->mq_freeze_lock);
block/blk-mq.c
169
freeze = blk_freeze_set_owner(q, owner);
block/blk-mq.c
170
if (++q->mq_freeze_depth == 1) {
block/blk-mq.c
171
percpu_ref_kill(&q->q_usage_counter);
block/blk-mq.c
172
mutex_unlock(&q->mq_freeze_lock);
block/blk-mq.c
173
if (queue_is_mq(q))
block/blk-mq.c
1737
struct request_queue *q =
block/blk-mq.c
174
blk_mq_run_hw_queues(q, false);
block/blk-mq.c
1758
if (!percpu_ref_tryget(&q->q_usage_counter))
block/blk-mq.c
176
mutex_unlock(&q->mq_freeze_lock);
block/blk-mq.c
1762
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
block/blk-mq.c
1770
blk_mq_wait_quiesce_done(q->tag_set);
block/blk-mq.c
1773
blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
block/blk-mq.c
1777
mod_timer(&q->timeout, expired.next);
block/blk-mq.c
1785
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
1791
blk_queue_exit(q);
block/blk-mq.c
182
void blk_freeze_queue_start(struct request_queue *q)
block/blk-mq.c
184
if (__blk_freeze_queue_start(q, current))
block/blk-mq.c
185
blk_freeze_acquire_lock(q);
block/blk-mq.c
189
void blk_mq_freeze_queue_wait(struct request_queue *q)
block/blk-mq.c
191
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
block/blk-mq.c
195
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
block/blk-mq.c
198
return wait_event_timeout(q->mq_freeze_wq,
block/blk-mq.c
199
percpu_ref_is_zero(&q->q_usage_counter),
block/blk-mq.c
204
void blk_mq_freeze_queue_nomemsave(struct request_queue *q)
block/blk-mq.c
2052
budget_token = blk_mq_get_dispatch_budget(rq->q);
block/blk-mq.c
206
blk_freeze_queue_start(q);
block/blk-mq.c
207
blk_mq_freeze_queue_wait(q);
block/blk-mq.c
2074
blk_mq_put_dispatch_budget(rq->q, budget_token);
block/blk-mq.c
2083
static void blk_mq_release_budgets(struct request_queue *q,
block/blk-mq.c
2092
blk_mq_put_dispatch_budget(q, budget_token);
block/blk-mq.c
211
bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
block/blk-mq.c
2120
struct request_queue *q = hctx->queue;
block/blk-mq.c
2148
ret = q->mq_ops->queue_rq(hctx, &bd);
block/blk-mq.c
215
mutex_lock(&q->mq_freeze_lock);
block/blk-mq.c
217
q->q_usage_counter.data->force_atomic = true;
block/blk-mq.c
218
q->mq_freeze_depth--;
block/blk-mq.c
2186
blk_mq_release_budgets(q, list);
block/blk-mq.c
219
WARN_ON_ONCE(q->mq_freeze_depth < 0);
block/blk-mq.c
220
if (!q->mq_freeze_depth) {
block/blk-mq.c
221
percpu_ref_resurrect(&q->q_usage_counter);
block/blk-mq.c
222
wake_up_all(&q->mq_freeze_wq);
block/blk-mq.c
224
unfreeze = blk_unfreeze_check_owner(q);
block/blk-mq.c
225
mutex_unlock(&q->mq_freeze_lock);
block/blk-mq.c
230
void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q)
block/blk-mq.c
232
if (__blk_mq_unfreeze_queue(q, false))
block/blk-mq.c
233
blk_unfreeze_release_lock(q);
block/blk-mq.c
2395
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
block/blk-mq.c
2397
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
block/blk-mq.c
2417
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
block/blk-mq.c
2423
if (blk_queue_sq_sched(q))
block/blk-mq.c
2424
sq_hctx = blk_mq_get_sq_hctx(q);
block/blk-mq.c
2425
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
244
void blk_freeze_queue_start_non_owner(struct request_queue *q)
block/blk-mq.c
2445
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
block/blk-mq.c
2451
if (blk_queue_sq_sched(q))
block/blk-mq.c
2452
sq_hctx = blk_mq_get_sq_hctx(q);
block/blk-mq.c
2453
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
246
__blk_freeze_queue_start(q, NULL);
block/blk-mq.c
2502
void blk_mq_stop_hw_queues(struct request_queue *q)
block/blk-mq.c
2507
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
251
void blk_mq_unfreeze_queue_non_owner(struct request_queue *q)
block/blk-mq.c
2520
void blk_mq_start_hw_queues(struct request_queue *q)
block/blk-mq.c
2525
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
253
__blk_mq_unfreeze_queue(q, false);
block/blk-mq.c
2546
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
block/blk-mq.c
2551
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
261
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
block/blk-mq.c
2625
struct request_queue *q = rq->q;
block/blk-mq.c
265
spin_lock_irqsave(&q->queue_lock, flags);
block/blk-mq.c
266
if (!q->quiesce_depth++)
block/blk-mq.c
2664
} else if (q->elevator) {
block/blk-mq.c
267
blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
block/blk-mq.c
2670
q->elevator->type->ops.insert_requests(hctx, &list, flags);
block/blk-mq.c
268
spin_unlock_irqrestore(&q->queue_lock, flags);
block/blk-mq.c
2700
rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
block/blk-mq.c
2713
struct request_queue *q = rq->q;
block/blk-mq.c
2725
ret = q->mq_ops->queue_rq(hctx, &bd);
block/blk-mq.c
2747
budget_token = blk_mq_get_dispatch_budget(rq->q);
block/blk-mq.c
2752
blk_mq_put_dispatch_budget(rq->q, budget_token);
block/blk-mq.c
2773
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
block/blk-mq.c
2804
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
block/blk-mq.c
2854
static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs)
block/blk-mq.c
2856
if (blk_queue_quiesced(q))
block/blk-mq.c
2858
q->mq_ops->queue_rqs(rqs);
block/blk-mq.c
2865
struct request_queue *this_q = rq->q;
block/blk-mq.c
2873
if (rq->q == this_q) {
block/blk-mq.c
2892
struct request_queue *q = rq_list_peek(rqs)->q;
block/blk-mq.c
2894
trace_block_unplug(q, depth, true);
block/blk-mq.c
2902
if (q->mq_ops->queue_rqs) {
block/blk-mq.c
2903
blk_mq_run_dispatch_ops(q, __blk_mq_flush_list(q, rqs));
block/blk-mq.c
2908
blk_mq_run_dispatch_ops(q, blk_mq_issue_direct(rqs));
block/blk-mq.c
299
void blk_mq_quiesce_queue(struct request_queue *q)
block/blk-mq.c
301
blk_mq_quiesce_queue_nowait(q);
block/blk-mq.c
303
if (queue_is_mq(q))
block/blk-mq.c
3034
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
block/blk-mq.c
3037
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
block/blk-mq.c
3038
if (blk_attempt_plug_merge(q, bio, nr_segs))
block/blk-mq.c
304
blk_mq_wait_quiesce_done(q->tag_set);
block/blk-mq.c
3040
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
block/blk-mq.c
3046
static struct request *blk_mq_get_new_requests(struct request_queue *q,
block/blk-mq.c
3051
.q = q,
block/blk-mq.c
3063
rq_qos_throttle(q, bio);
block/blk-mq.c
3073
rq_qos_cleanup(q, bio);
block/blk-mq.c
3081
struct request_queue *q, blk_opf_t opf)
block/blk-mq.c
3089
if (!rq || rq->q != q)
block/blk-mq.c
3110
rq_qos_throttle(rq->q, bio);
block/blk-mq.c
3117
static bool bio_unaligned(const struct bio *bio, struct request_queue *q)
block/blk-mq.c
3119
unsigned int bs_mask = queue_logical_block_size(q) - 1;
block/blk-mq.c
3143
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk-mq.c
315
void blk_mq_unquiesce_queue(struct request_queue *q)
block/blk-mq.c
3154
rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
block/blk-mq.c
3165
blk_queue_exit(q);
block/blk-mq.c
3183
if (unlikely(bio_unaligned(bio, q))) {
block/blk-mq.c
3188
if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) {
block/blk-mq.c
3194
bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
block/blk-mq.c
320
spin_lock_irqsave(&q->queue_lock, flags);
block/blk-mq.c
3201
blk_mq_bio_issue_init(q, bio);
block/blk-mq.c
3202
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
block/blk-mq.c
321
if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
block/blk-mq.c
3214
rq = blk_mq_get_new_requests(q, plug, bio);
block/blk-mq.c
3224
rq_qos_track(q, rq, bio);
block/blk-mq.c
323
} else if (!--q->quiesce_depth) {
block/blk-mq.c
324
blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
block/blk-mq.c
3249
(hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
block/blk-mq.c
3253
blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
block/blk-mq.c
3263
blk_queue_exit(q);
block/blk-mq.c
327
spin_unlock_irqrestore(&q->queue_lock, flags);
block/blk-mq.c
3273
struct request_queue *q = rq->q;
block/blk-mq.c
3308
if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
block/blk-mq.c
331
blk_mq_run_hw_queues(q, true);
block/blk-mq.c
3322
blk_mq_run_dispatch_ops(q,
block/blk-mq.c
337
struct request_queue *q;
block/blk-mq.c
3377
struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src,
block/blk-mq.c
340
list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
341
if (!blk_queue_skip_tagset_quiesce(q))
block/blk-mq.c
342
blk_mq_quiesce_queue_nowait(q);
block/blk-mq.c
352
struct request_queue *q;
block/blk-mq.c
355
list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
356
if (!blk_queue_skip_tagset_quiesce(q))
block/blk-mq.c
357
blk_mq_unquiesce_queue(q);
block/blk-mq.c
363
void blk_mq_wake_waiters(struct request_queue *q)
block/blk-mq.c
368
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
373
void blk_rq_init(struct request_queue *q, struct request *rq)
block/blk-mq.c
378
rq->q = q;
block/blk-mq.c
3890
static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
block/blk-mq.c
3894
spin_lock(&q->unused_hctx_lock);
block/blk-mq.c
3895
list_splice_init(&q->unused_hctx_list, &hctx_list);
block/blk-mq.c
3896
spin_unlock(&q->unused_hctx_lock);
block/blk-mq.c
3902
spin_lock(&q->unused_hctx_lock);
block/blk-mq.c
3903
list_splice(&hctx_list, &q->unused_hctx_list);
block/blk-mq.c
3904
spin_unlock(&q->unused_hctx_lock);
block/blk-mq.c
3912
static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
block/blk-mq.c
3918
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
394
if (blk_queue_rq_alloc_time(rq->q))
block/blk-mq.c
3951
static void blk_mq_exit_hctx(struct request_queue *q,
block/blk-mq.c
3960
if (blk_queue_init_done(q))
block/blk-mq.c
3973
spin_lock(&q->unused_hctx_lock);
block/blk-mq.c
3974
list_add(&hctx->hctx_list, &q->unused_hctx_list);
block/blk-mq.c
3975
spin_unlock(&q->unused_hctx_lock);
block/blk-mq.c
3978
static void blk_mq_exit_hw_queues(struct request_queue *q,
block/blk-mq.c
3984
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
3988
blk_mq_exit_hctx(q, set, hctx, i);
block/blk-mq.c
3992
static int blk_mq_init_hctx(struct request_queue *q,
block/blk-mq.c
401
static inline void blk_mq_bio_issue_init(struct request_queue *q,
block/blk-mq.c
4027
blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
block/blk-mq.c
405
if (test_bit(QUEUE_FLAG_BIO_ISSUE_TIME, &q->queue_flags))
block/blk-mq.c
4050
hctx->queue = q;
block/blk-mq.c
4087
static void blk_mq_init_cpu_queues(struct request_queue *q,
block/blk-mq.c
4090
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq.c
4094
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
block/blk-mq.c
4103
__ctx->queue = q;
block/blk-mq.c
4110
hctx = blk_mq_map_queue_type(q, j, i);
block/blk-mq.c
415
struct request_queue *q = data->q;
block/blk-mq.c
4171
static void blk_mq_map_swqueue(struct request_queue *q)
block/blk-mq.c
4177
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq.c
4179
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
418
rq->q = q;
block/blk-mq.c
4192
ctx = per_cpu_ptr(q->queue_ctx, i);
block/blk-mq.c
4195
ctx->hctxs[j] = blk_mq_map_queue_type(q,
block/blk-mq.c
4212
hctx = blk_mq_map_queue_type(q, j, i);
block/blk-mq.c
4235
ctx->hctxs[j] = blk_mq_map_queue_type(q,
block/blk-mq.c
4239
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
4293
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
block/blk-mq.c
4298
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
4311
struct request_queue *q;
block/blk-mq.c
4316
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
4317
memflags = blk_mq_freeze_queue(q);
block/blk-mq.c
4318
queue_set_hctx_shared(q, shared);
block/blk-mq.c
4319
blk_mq_unfreeze_queue(q, memflags);
block/blk-mq.c
4323
static void blk_mq_del_queue_tag_set(struct request_queue *q)
block/blk-mq.c
4325
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq.c
4328
list_del_rcu(&q->tag_set_list);
block/blk-mq.c
4339
struct request_queue *q)
block/blk-mq.c
4353
queue_set_hctx_shared(q, true);
block/blk-mq.c
4354
list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
block/blk-mq.c
4360
static int blk_mq_alloc_ctxs(struct request_queue *q)
block/blk-mq.c
4378
q->mq_kobj = &ctxs->kobj;
block/blk-mq.c
4379
q->queue_ctx = ctxs->queue_ctx;
block/blk-mq.c
4393
void blk_mq_release(struct request_queue *q)
block/blk-mq.c
4398
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
4402
list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
block/blk-mq.c
4407
kfree(q->queue_hw_ctx);
block/blk-mq.c
4413
blk_mq_sysfs_deinit(q);
block/blk-mq.c
4420
struct request_queue *q;
block/blk-mq.c
4429
q = blk_alloc_queue(lim, set->numa_node);
block/blk-mq.c
4430
if (IS_ERR(q))
block/blk-mq.c
4431
return q;
block/blk-mq.c
4432
q->queuedata = queuedata;
block/blk-mq.c
4433
ret = blk_mq_init_allocated_queue(set, q);
block/blk-mq.c
4435
blk_put_queue(q);
block/blk-mq.c
4438
return q;
block/blk-mq.c
4452
void blk_mq_destroy_queue(struct request_queue *q)
block/blk-mq.c
4454
WARN_ON_ONCE(!queue_is_mq(q));
block/blk-mq.c
4455
WARN_ON_ONCE(blk_queue_registered(q));
block/blk-mq.c
4459
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
block/blk-mq.c
4460
blk_queue_start_drain(q);
block/blk-mq.c
4461
blk_mq_freeze_queue_wait(q);
block/blk-mq.c
4463
blk_sync_queue(q);
block/blk-mq.c
4464
blk_mq_cancel_work_sync(q);
block/blk-mq.c
4465
blk_mq_exit_queue(q);
block/blk-mq.c
4473
struct request_queue *q;
block/blk-mq.c
4476
q = blk_mq_alloc_queue(set, lim, queuedata);
block/blk-mq.c
4477
if (IS_ERR(q))
block/blk-mq.c
4478
return ERR_CAST(q);
block/blk-mq.c
4480
disk = __alloc_disk_node(q, set->numa_node, lkclass);
block/blk-mq.c
4482
blk_mq_destroy_queue(q);
block/blk-mq.c
4483
blk_put_queue(q);
block/blk-mq.c
4491
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
block/blk-mq.c
4496
if (!blk_get_queue(q))
block/blk-mq.c
4498
disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
block/blk-mq.c
4500
blk_put_queue(q);
block/blk-mq.c
451
struct elevator_queue *e = data->q->elevator;
block/blk-mq.c
4515
struct blk_mq_tag_set *set, struct request_queue *q,
block/blk-mq.c
4521
spin_lock(&q->unused_hctx_lock);
block/blk-mq.c
4522
list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
block/blk-mq.c
4530
spin_unlock(&q->unused_hctx_lock);
block/blk-mq.c
4533
hctx = blk_mq_alloc_hctx(q, set, node);
block/blk-mq.c
4537
if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
block/blk-mq.c
4549
struct request_queue *q)
block/blk-mq.c
4552
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
block/blk-mq.c
4554
if (q->nr_hw_queues < set->nr_hw_queues) {
block/blk-mq.c
4563
memcpy(new_hctxs, hctxs, q->nr_hw_queues *
block/blk-mq.c
4565
rcu_assign_pointer(q->queue_hw_ctx, new_hctxs);
block/blk-mq.c
4581
blk_mq_exit_hctx(q, set, old_hctx, i);
block/blk-mq.c
4584
hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i, node);
block/blk-mq.c
4590
hctxs[i] = blk_mq_alloc_and_init_hctx(set, q, i,
block/blk-mq.c
4600
j = q->nr_hw_queues;
block/blk-mq.c
4604
end = q->nr_hw_queues;
block/blk-mq.c
4605
q->nr_hw_queues = set->nr_hw_queues;
block/blk-mq.c
4612
blk_mq_exit_hctx(q, set, hctx, j);
block/blk-mq.c
4619
struct request_queue *q)
block/blk-mq.c
4621
__blk_mq_realloc_hw_ctxs(set, q);
block/blk-mq.c
4624
blk_mq_remove_hw_queues_cpuhp(q);
block/blk-mq.c
4627
blk_mq_add_hw_queues_cpuhp(q);
block/blk-mq.c
4631
struct request_queue *q)
block/blk-mq.c
4634
q->mq_ops = set->ops;
block/blk-mq.c
4640
q->tag_set = set;
block/blk-mq.c
4642
if (blk_mq_alloc_ctxs(q))
block/blk-mq.c
4646
blk_mq_sysfs_init(q);
block/blk-mq.c
4648
INIT_LIST_HEAD(&q->unused_hctx_list);
block/blk-mq.c
4649
spin_lock_init(&q->unused_hctx_lock);
block/blk-mq.c
4651
blk_mq_realloc_hw_ctxs(set, q);
block/blk-mq.c
4652
if (!q->nr_hw_queues)
block/blk-mq.c
4655
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
block/blk-mq.c
4656
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
block/blk-mq.c
4658
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
block/blk-mq.c
4660
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
block/blk-mq.c
4661
INIT_LIST_HEAD(&q->flush_list);
block/blk-mq.c
4662
INIT_LIST_HEAD(&q->requeue_list);
block/blk-mq.c
4663
spin_lock_init(&q->requeue_lock);
block/blk-mq.c
4665
q->nr_requests = set->queue_depth;
block/blk-mq.c
4666
q->async_depth = set->queue_depth;
block/blk-mq.c
4668
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
block/blk-mq.c
4669
blk_mq_map_swqueue(q);
block/blk-mq.c
4670
blk_mq_add_queue_tag_set(set, q);
block/blk-mq.c
4674
blk_mq_release(q);
block/blk-mq.c
4676
q->mq_ops = NULL;
block/blk-mq.c
4682
void blk_mq_exit_queue(struct request_queue *q)
block/blk-mq.c
4684
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq.c
4687
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
block/blk-mq.c
4689
blk_mq_del_queue_tag_set(q);
block/blk-mq.c
495
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
block/blk-mq.c
4993
struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
block/blk-mq.c
4997
struct blk_mq_tag_set *set = q->tag_set;
block/blk-mq.c
5002
blk_mq_quiesce_queue(q);
block/blk-mq.c
5009
if (q->elevator)
block/blk-mq.c
5010
blk_mq_tag_update_sched_shared_tags(q, nr);
block/blk-mq.c
5013
} else if (!q->elevator) {
block/blk-mq.c
5018
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
5024
} else if (nr <= q->elevator->et->nr_requests) {
block/blk-mq.c
5026
queue_for_each_hw_ctx(q, hctx, i) {
block/blk-mq.c
5034
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
5036
old_et = q->elevator->et;
block/blk-mq.c
5037
q->elevator->et = et;
block/blk-mq.c
5044
q->async_depth = max(q->async_depth * nr / q->nr_requests, 1);
block/blk-mq.c
5045
q->nr_requests = nr;
block/blk-mq.c
5046
if (q->elevator && q->elevator->type->ops.depth_updated)
block/blk-mq.c
5047
q->elevator->type->ops.depth_updated(q);
block/blk-mq.c
5049
blk_mq_unquiesce_queue(q);
block/blk-mq.c
5056
static void blk_mq_elv_switch_back(struct request_queue *q,
block/blk-mq.c
5059
struct elv_change_ctx *ctx = xa_load(elv_tbl, q->id);
block/blk-mq.c
506
if (!data->q->elevator) {
block/blk-mq.c
5065
elv_update_nr_hw_queues(q, ctx);
block/blk-mq.c
5075
static int blk_mq_elv_switch_none(struct request_queue *q,
block/blk-mq.c
5080
lockdep_assert_held_write(&q->tag_set->update_nr_hwq_lock);
block/blk-mq.c
5089
if (q->elevator) {
block/blk-mq.c
5090
ctx = xa_load(elv_tbl, q->id);
block/blk-mq.c
5094
ctx->name = q->elevator->type->elevator_name;
block/blk-mq.c
5103
__elevator_get(q->elevator->type);
block/blk-mq.c
5109
ctx->type = q->elevator->type;
block/blk-mq.c
5110
elevator_set_none(q);
block/blk-mq.c
5118
struct request_queue *q;
block/blk-mq.c
5144
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
5145
blk_mq_debugfs_unregister_hctxs(q);
block/blk-mq.c
5146
blk_mq_sysfs_unregister_hctxs(q);
block/blk-mq.c
5154
list_for_each_entry(q, &set->tag_list, tag_set_list)
block/blk-mq.c
5155
if (blk_mq_elv_switch_none(q, &elv_tbl))
block/blk-mq.c
5162
list_for_each_entry(q, &set->tag_list, tag_set_list)
block/blk-mq.c
5163
blk_mq_freeze_queue_nomemsave(q);
block/blk-mq.c
5173
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
5174
__blk_mq_realloc_hw_ctxs(set, q);
block/blk-mq.c
5176
if (q->nr_hw_queues != set->nr_hw_queues) {
block/blk-mq.c
5187
blk_mq_map_swqueue(q);
block/blk-mq.c
5191
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
5194
blk_mq_freeze_queue_nomemsave(q);
block/blk-mq.c
5195
blk_mq_elv_switch_back(q, &elv_tbl);
block/blk-mq.c
5198
list_for_each_entry(q, &set->tag_list, tag_set_list) {
block/blk-mq.c
5199
blk_mq_sysfs_register_hctxs(q);
block/blk-mq.c
5200
blk_mq_debugfs_register_hctxs(q);
block/blk-mq.c
5202
blk_mq_remove_hw_queues_cpuhp(q);
block/blk-mq.c
5203
blk_mq_add_hw_queues_cpuhp(q);
block/blk-mq.c
5226
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
block/blk-mq.c
5232
ret = q->mq_ops->poll(hctx, iob);
block/blk-mq.c
5245
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
block/blk-mq.c
5248
if (!blk_mq_can_poll(q))
block/blk-mq.c
5250
return blk_hctx_poll(q, q->queue_hw_ctx[cookie], iob, flags);
block/blk-mq.c
5256
struct request_queue *q = rq->q;
block/blk-mq.c
5261
if (!percpu_ref_tryget(&q->q_usage_counter))
block/blk-mq.c
5264
ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
block/blk-mq.c
5265
blk_queue_exit(q);
block/blk-mq.c
5277
void blk_mq_cancel_work_sync(struct request_queue *q)
block/blk-mq.c
5282
cancel_delayed_work_sync(&q->requeue_work);
block/blk-mq.c
5284
queue_for_each_hw_ctx(q, hctx, i)
block/blk-mq.c
532
ops = &data->q->elevator->type->ops;
block/blk-mq.c
539
struct request_queue *q = data->q;
block/blk-mq.c
54
static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
block/blk-mq.c
545
if (blk_queue_rq_alloc_time(q))
block/blk-mq.c
552
data->ctx = blk_mq_get_ctx(q);
block/blk-mq.c
597
static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
block/blk-mq.c
603
.q = q,
block/blk-mq.c
615
if (blk_queue_enter(q, flags))
block/blk-mq.c
622
blk_queue_exit(q);
block/blk-mq.c
626
static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
block/blk-mq.c
639
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
block/blk-mq.c
644
if (!rq || rq->q != q)
block/blk-mq.c
661
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
block/blk-mq.c
666
rq = blk_mq_alloc_cached_request(q, opf, flags);
block/blk-mq.c
669
.q = q,
block/blk-mq.c
681
ret = blk_queue_enter(q, flags);
block/blk-mq.c
695
blk_queue_exit(q);
block/blk-mq.c
700
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
block/blk-mq.c
704
.q = q,
block/blk-mq.c
721
if (blk_queue_rq_alloc_time(q))
block/blk-mq.c
734
if (hctx_idx >= q->nr_hw_queues)
block/blk-mq.c
737
ret = blk_queue_enter(q, flags);
block/blk-mq.c
746
data.hctx = q->queue_hw_ctx[hctx_idx];
block/blk-mq.c
752
data.ctx = __blk_mq_get_ctx(q, cpu);
block/blk-mq.c
754
if (q->elevator)
block/blk-mq.c
777
blk_queue_exit(q);
block/blk-mq.c
784
struct request_queue *q = rq->q;
block/blk-mq.c
789
q->elevator->type->ops.finish_request(rq);
block/blk-mq.c
801
struct request_queue *q = rq->q;
block/blk-mq.c
817
blk_queue_exit(q);
block/blk-mq.c
822
struct request_queue *q = rq->q;
block/blk-mq.c
826
rq_qos_done(q, rq);
block/blk-mq.c
845
rq->q->disk ? rq->q->disk->disk_name : "?",
block/blk-mq.c
873
req->q->disk ? req->q->disk->disk_name : "?",
block/blk-mq.c
978
!test_bit(GD_DEAD, &req->q->disk->state)) {
block/blk-mq.h
129
extern void blk_mq_sysfs_init(struct request_queue *q);
block/blk-mq.h
130
extern void blk_mq_sysfs_deinit(struct request_queue *q);
block/blk-mq.h
133
int blk_mq_sysfs_register_hctxs(struct request_queue *q);
block/blk-mq.h
134
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
block/blk-mq.h
139
void blk_mq_cancel_work_sync(struct request_queue *q);
block/blk-mq.h
141
void blk_mq_release(struct request_queue *q);
block/blk-mq.h
143
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
block/blk-mq.h
146
return per_cpu_ptr(q->queue_ctx, cpu);
block/blk-mq.h
155
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
block/blk-mq.h
157
return __blk_mq_get_ctx(q, raw_smp_processor_id());
block/blk-mq.h
162
struct request_queue *q;
block/blk-mq.h
189
void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
block/blk-mq.h
193
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
block/blk-mq.h
264
static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
block/blk-mq.h
267
if (q->mq_ops->put_budget)
block/blk-mq.h
268
q->mq_ops->put_budget(q, budget_token);
block/blk-mq.h
271
static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
block/blk-mq.h
273
if (q->mq_ops->get_budget)
block/blk-mq.h
274
return q->mq_ops->get_budget(q);
block/blk-mq.h
283
if (rq->q->mq_ops->set_rq_budget_token)
block/blk-mq.h
284
rq->q->mq_ops->set_rq_budget_token(rq, token);
block/blk-mq.h
289
if (rq->q->mq_ops->get_rq_budget_token)
block/blk-mq.h
290
return rq->q->mq_ops->get_rq_budget_token(rq);
block/blk-mq.h
418
struct request_queue *q = hctx->queue;
block/blk-mq.h
420
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
block/blk-mq.h
439
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
block/blk-mq.h
441
if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
block/blk-mq.h
442
struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
block/blk-mq.h
456
#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
block/blk-mq.h
457
__blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
block/blk-mq.h
459
static inline bool blk_mq_can_poll(struct request_queue *q)
block/blk-mq.h
46
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
block/blk-mq.h
461
return (q->limits.features & BLK_FEAT_POLL) &&
block/blk-mq.h
462
q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
block/blk-mq.h
48
void blk_mq_exit_queue(struct request_queue *q);
block/blk-mq.h
49
struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
block/blk-mq.h
52
void blk_mq_wake_waiters(struct request_queue *q);
block/blk-mq.h
83
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
block/blk-mq.h
87
return queue_hctx((q), (q->tag_set->map[type].mq_map[cpu]));
block/blk-pm.c
100
blk_clear_pm_only(q);
block/blk-pm.c
120
void blk_post_runtime_suspend(struct request_queue *q, int err)
block/blk-pm.c
122
if (!q->dev)
block/blk-pm.c
125
spin_lock_irq(&q->queue_lock);
block/blk-pm.c
127
q->rpm_status = RPM_SUSPENDED;
block/blk-pm.c
129
q->rpm_status = RPM_ACTIVE;
block/blk-pm.c
130
pm_runtime_mark_last_busy(q->dev);
block/blk-pm.c
132
spin_unlock_irq(&q->queue_lock);
block/blk-pm.c
135
blk_clear_pm_only(q);
block/blk-pm.c
150
void blk_pre_runtime_resume(struct request_queue *q)
block/blk-pm.c
152
if (!q->dev)
block/blk-pm.c
155
spin_lock_irq(&q->queue_lock);
block/blk-pm.c
156
q->rpm_status = RPM_RESUMING;
block/blk-pm.c
157
spin_unlock_irq(&q->queue_lock);
block/blk-pm.c
174
void blk_post_runtime_resume(struct request_queue *q)
block/blk-pm.c
178
if (!q->dev)
block/blk-pm.c
181
spin_lock_irq(&q->queue_lock);
block/blk-pm.c
182
old_status = q->rpm_status;
block/blk-pm.c
183
q->rpm_status = RPM_ACTIVE;
block/blk-pm.c
184
pm_runtime_mark_last_busy(q->dev);
block/blk-pm.c
185
pm_request_autosuspend(q->dev);
block/blk-pm.c
186
spin_unlock_irq(&q->queue_lock);
block/blk-pm.c
189
blk_clear_pm_only(q);
block/blk-pm.c
29
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
block/blk-pm.c
31
q->dev = dev;
block/blk-pm.c
32
q->rpm_status = RPM_ACTIVE;
block/blk-pm.c
33
pm_runtime_set_autosuspend_delay(q->dev, -1);
block/blk-pm.c
34
pm_runtime_use_autosuspend(q->dev);
block/blk-pm.c
59
int blk_pre_runtime_suspend(struct request_queue *q)
block/blk-pm.c
63
if (!q->dev)
block/blk-pm.c
66
WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
block/blk-pm.c
68
spin_lock_irq(&q->queue_lock);
block/blk-pm.c
69
q->rpm_status = RPM_SUSPENDING;
block/blk-pm.c
70
spin_unlock_irq(&q->queue_lock);
block/blk-pm.c
78
blk_set_pm_only(q);
block/blk-pm.c
81
blk_freeze_queue_start(q);
block/blk-pm.c
88
percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
block/blk-pm.c
89
if (percpu_ref_is_zero(&q->q_usage_counter))
block/blk-pm.c
92
blk_mq_unfreeze_queue_nomemrestore(q);
block/blk-pm.c
95
spin_lock_irq(&q->queue_lock);
block/blk-pm.c
96
q->rpm_status = RPM_ACTIVE;
block/blk-pm.c
97
pm_runtime_mark_last_busy(q->dev);
block/blk-pm.c
98
spin_unlock_irq(&q->queue_lock);
block/blk-pm.h
11
if (!q->dev || !blk_queue_pm_only(q))
block/blk-pm.h
13
if (pm && q->rpm_status != RPM_SUSPENDED)
block/blk-pm.h
15
pm_request_resume(q->dev);
block/blk-pm.h
21
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
block/blk-pm.h
22
pm_runtime_mark_last_busy(rq->q->dev);
block/blk-pm.h
25
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
block/blk-pm.h
9
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
block/blk-rq-qos.c
313
void rq_qos_exit(struct request_queue *q)
block/blk-rq-qos.c
315
mutex_lock(&q->rq_qos_mutex);
block/blk-rq-qos.c
316
while (q->rq_qos) {
block/blk-rq-qos.c
317
struct rq_qos *rqos = q->rq_qos;
block/blk-rq-qos.c
318
q->rq_qos = rqos->next;
block/blk-rq-qos.c
321
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
block/blk-rq-qos.c
322
mutex_unlock(&q->rq_qos_mutex);
block/blk-rq-qos.c
328
struct request_queue *q = disk->queue;
block/blk-rq-qos.c
331
lockdep_assert_held(&q->rq_qos_mutex);
block/blk-rq-qos.c
341
memflags = blk_mq_freeze_queue(q);
block/blk-rq-qos.c
343
if (rq_qos_id(q, rqos->id))
block/blk-rq-qos.c
345
rqos->next = q->rq_qos;
block/blk-rq-qos.c
346
q->rq_qos = rqos;
block/blk-rq-qos.c
347
blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q);
block/blk-rq-qos.c
349
blk_mq_unfreeze_queue(q, memflags);
block/blk-rq-qos.c
352
blk_mq_unfreeze_queue(q, memflags);
block/blk-rq-qos.c
358
struct request_queue *q = rqos->disk->queue;
block/blk-rq-qos.c
362
lockdep_assert_held(&q->rq_qos_mutex);
block/blk-rq-qos.c
364
memflags = blk_mq_freeze_queue(q);
block/blk-rq-qos.c
365
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
block/blk-rq-qos.c
371
if (!q->rq_qos)
block/blk-rq-qos.c
372
blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
block/blk-rq-qos.c
373
blk_mq_unfreeze_queue(q, memflags);
block/blk-rq-qos.h
113
static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
block/blk-rq-qos.h
115
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
block/blk-rq-qos.h
116
__rq_qos_cleanup(q->rq_qos, bio);
block/blk-rq-qos.h
119
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
block/blk-rq-qos.h
121
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) &&
block/blk-rq-qos.h
122
q->rq_qos && !blk_rq_is_passthrough(rq))
block/blk-rq-qos.h
123
__rq_qos_done(q->rq_qos, rq);
block/blk-rq-qos.h
126
static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
block/blk-rq-qos.h
128
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
block/blk-rq-qos.h
129
__rq_qos_issue(q->rq_qos, rq);
block/blk-rq-qos.h
132
static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
block/blk-rq-qos.h
134
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
block/blk-rq-qos.h
135
__rq_qos_requeue(q->rq_qos, rq);
block/blk-rq-qos.h
140
struct request_queue *q;
block/blk-rq-qos.h
146
q = bdev_get_queue(bio->bi_bdev);
block/blk-rq-qos.h
156
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
block/blk-rq-qos.h
157
__rq_qos_done_bio(q->rq_qos, bio);
block/blk-rq-qos.h
160
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
block/blk-rq-qos.h
162
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
block/blk-rq-qos.h
164
__rq_qos_throttle(q->rq_qos, bio);
block/blk-rq-qos.h
168
static inline void rq_qos_track(struct request_queue *q, struct request *rq,
block/blk-rq-qos.h
171
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
block/blk-rq-qos.h
172
__rq_qos_track(q->rq_qos, rq, bio);
block/blk-rq-qos.h
175
static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
block/blk-rq-qos.h
178
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos) {
block/blk-rq-qos.h
180
__rq_qos_merge(q->rq_qos, rq, bio);
block/blk-rq-qos.h
184
static inline void rq_qos_queue_depth_changed(struct request_queue *q)
block/blk-rq-qos.h
186
if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
block/blk-rq-qos.h
187
__rq_qos_queue_depth_changed(q->rq_qos);
block/blk-rq-qos.h
61
static inline struct rq_qos *rq_qos_id(struct request_queue *q,
block/blk-rq-qos.h
65
for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
block/blk-rq-qos.h
72
static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
block/blk-rq-qos.h
74
return rq_qos_id(q, RQ_QOS_WBT);
block/blk-rq-qos.h
77
static inline struct rq_qos *iolat_rq_qos(struct request_queue *q)
block/blk-rq-qos.h
79
return rq_qos_id(q, RQ_QOS_LATENCY);
block/blk-settings.c
1033
void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
block/blk-settings.c
1035
q->queue_depth = depth;
block/blk-settings.c
1036
rq_qos_queue_depth_changed(q);
block/blk-settings.c
1042
struct request_queue *q = bdev_get_queue(bdev);
block/blk-settings.c
1044
if (q->limits.flags & BLK_FLAG_MISALIGNED)
block/blk-settings.c
1047
return queue_limit_alignment_offset(&q->limits,
block/blk-settings.c
1049
return q->limits.alignment_offset;
block/blk-settings.c
1055
struct request_queue *q = bdev_get_queue(bdev);
block/blk-settings.c
1058
return queue_limit_discard_alignment(&q->limits,
block/blk-settings.c
1060
return q->limits.discard_alignment;
block/blk-settings.c
24
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
block/blk-settings.c
26
WRITE_ONCE(q->rq_timeout, timeout);
block/blk-settings.c
558
int queue_limits_commit_update(struct request_queue *q,
block/blk-settings.c
563
lockdep_assert_held(&q->limits_lock);
block/blk-settings.c
570
if (q->crypto_profile && lim->integrity.tag_size) {
block/blk-settings.c
577
q->limits = *lim;
block/blk-settings.c
578
if (q->disk)
block/blk-settings.c
579
blk_apply_bdi_limits(q->disk->bdi, lim);
block/blk-settings.c
581
mutex_unlock(&q->limits_lock);
block/blk-settings.c
597
int queue_limits_commit_update_frozen(struct request_queue *q,
block/blk-settings.c
603
memflags = blk_mq_freeze_queue(q);
block/blk-settings.c
604
ret = queue_limits_commit_update(q, lim);
block/blk-settings.c
605
blk_mq_unfreeze_queue(q, memflags);
block/blk-settings.c
622
int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
block/blk-settings.c
624
mutex_lock(&q->limits_lock);
block/blk-settings.c
625
return queue_limits_commit_update(q, lim);
block/blk-stat.c
132
void blk_stat_add_callback(struct request_queue *q,
block/blk-stat.c
147
spin_lock_irqsave(&q->stats->lock, flags);
block/blk-stat.c
148
list_add_tail_rcu(&cb->list, &q->stats->callbacks);
block/blk-stat.c
149
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
block/blk-stat.c
150
spin_unlock_irqrestore(&q->stats->lock, flags);
block/blk-stat.c
153
void blk_stat_remove_callback(struct request_queue *q,
block/blk-stat.c
158
spin_lock_irqsave(&q->stats->lock, flags);
block/blk-stat.c
160
if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
block/blk-stat.c
161
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
block/blk-stat.c
162
spin_unlock_irqrestore(&q->stats->lock, flags);
block/blk-stat.c
183
void blk_stat_disable_accounting(struct request_queue *q)
block/blk-stat.c
187
spin_lock_irqsave(&q->stats->lock, flags);
block/blk-stat.c
188
if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
block/blk-stat.c
189
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
block/blk-stat.c
190
spin_unlock_irqrestore(&q->stats->lock, flags);
block/blk-stat.c
194
void blk_stat_enable_accounting(struct request_queue *q)
block/blk-stat.c
198
spin_lock_irqsave(&q->stats->lock, flags);
block/blk-stat.c
199
if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
block/blk-stat.c
200
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
block/blk-stat.c
201
spin_unlock_irqrestore(&q->stats->lock, flags);
block/blk-stat.c
52
struct request_queue *q = rq->q;
block/blk-stat.c
62
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
block/blk-stat.h
102
void blk_stat_add_callback(struct request_queue *q,
block/blk-stat.h
114
void blk_stat_remove_callback(struct request_queue *q,
block/blk-stat.h
74
void blk_stat_enable_accounting(struct request_queue *q);
block/blk-stat.h
75
void blk_stat_disable_accounting(struct request_queue *q);
block/blk-sysfs.c
1006
mutex_lock(&q->sysfs_lock);
block/blk-sysfs.c
1007
blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
block/blk-sysfs.c
1008
mutex_unlock(&q->sysfs_lock);
block/blk-sysfs.c
1014
if (queue_is_mq(q))
block/blk-sysfs.c
1018
mutex_lock(&q->sysfs_lock);
block/blk-sysfs.c
1020
mutex_unlock(&q->sysfs_lock);
block/blk-sysfs.c
1026
if (queue_is_mq(q))
block/blk-sysfs.c
1027
elevator_set_none(q);
block/blk-sysfs.c
103
(q->elevator && nr > MAX_SCHED_RQ) ||
block/blk-sysfs.c
104
(!q->elevator && nr > set->queue_depth)) {
block/blk-sysfs.c
109
if (!blk_mq_is_shared_tags(set->flags) && q->elevator &&
block/blk-sysfs.c
110
nr > q->elevator->et->nr_requests) {
block/blk-sysfs.c
115
et = blk_mq_alloc_sched_tags(set, q->nr_hw_queues, nr);
block/blk-sysfs.c
122
memflags = blk_mq_freeze_queue(q);
block/blk-sysfs.c
123
mutex_lock(&q->elevator_lock);
block/blk-sysfs.c
124
et = blk_mq_update_nr_requests(q, et, nr);
block/blk-sysfs.c
125
mutex_unlock(&q->elevator_lock);
block/blk-sysfs.c
126
blk_mq_unfreeze_queue(q, memflags);
block/blk-sysfs.c
146
struct request_queue *q = disk->queue;
block/blk-sysfs.c
151
if (!queue_is_mq(q))
block/blk-sysfs.c
161
memflags = blk_mq_freeze_queue(q);
block/blk-sysfs.c
162
scoped_guard(mutex, &q->elevator_lock) {
block/blk-sysfs.c
163
if (q->elevator) {
block/blk-sysfs.c
164
q->async_depth = min(q->nr_requests, nr);
block/blk-sysfs.c
165
if (q->elevator->type->ops.depth_updated)
block/blk-sysfs.c
166
q->elevator->type->ops.depth_updated(q);
block/blk-sysfs.c
171
blk_mq_unfreeze_queue(q, memflags);
block/blk-sysfs.c
192
struct request_queue *q = disk->queue;
block/blk-sysfs.c
205
mutex_lock(&q->limits_lock);
block/blk-sysfs.c
207
mutex_unlock(&q->limits_lock);
block/blk-sysfs.c
425
struct request_queue *q = disk->queue;
block/blk-sysfs.c
431
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
block/blk-sysfs.c
432
blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
block/blk-sysfs.c
434
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
block/blk-sysfs.c
436
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
block/blk-sysfs.c
454
struct request_queue *q = disk->queue;
block/blk-sysfs.c
468
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
block/blk-sysfs.c
469
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
block/blk-sysfs.c
471
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
block/blk-sysfs.c
472
blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
block/blk-sysfs.c
474
blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
block/blk-sysfs.c
475
blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
block/blk-sysfs.c
491
struct request_queue *q = disk->queue;
block/blk-sysfs.c
493
if (!(q->limits.features & BLK_FEAT_POLL)) {
block/blk-sysfs.c
515
struct request_queue *q = disk->queue;
block/blk-sysfs.c
521
blk_queue_rq_timeout(q, msecs_to_jiffies(val));
block/blk-sysfs.c
664
struct request_queue *q = disk->queue;
block/blk-sysfs.c
667
if (!wbt_rq_qos(q)) {
block/blk-sysfs.c
67
struct request_queue *q = disk->queue;
block/blk-sysfs.c
672
if (wbt_disabled(q)) {
block/blk-sysfs.c
677
ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
block/blk-sysfs.c
68
struct blk_mq_tag_set *set = q->tag_set;
block/blk-sysfs.c
786
struct request_queue *q = disk->queue;
block/blk-sysfs.c
790
!blk_queue_is_zoned(q))
block/blk-sysfs.c
800
struct request_queue *q = disk->queue;
block/blk-sysfs.c
802
if (!queue_is_mq(q))
block/blk-sysfs.c
805
if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout)
block/blk-sysfs.c
850
struct request_queue *q = disk->queue;
block/blk-sysfs.c
858
struct queue_limits lim = queue_limits_start_update(q);
block/blk-sysfs.c
862
queue_limits_cancel_update(q);
block/blk-sysfs.c
866
res = queue_limits_commit_update_frozen(q, &lim);
block/blk-sysfs.c
899
struct request_queue *q = disk->queue;
block/blk-sysfs.c
90
if (nr == q->nr_requests)
block/blk-sysfs.c
901
blk_debugfs_lock_nomemsave(q);
block/blk-sysfs.c
902
blk_trace_shutdown(q);
block/blk-sysfs.c
903
debugfs_remove_recursive(q->debugfs_dir);
block/blk-sysfs.c
904
q->debugfs_dir = NULL;
block/blk-sysfs.c
905
q->sched_debugfs_dir = NULL;
block/blk-sysfs.c
906
q->rqos_debugfs_dir = NULL;
block/blk-sysfs.c
907
blk_debugfs_unlock_nomemrestore(q);
block/blk-sysfs.c
916
struct request_queue *q = disk->queue;
block/blk-sysfs.c
924
if (queue_is_mq(q)) {
block/blk-sysfs.c
929
mutex_lock(&q->sysfs_lock);
block/blk-sysfs.c
931
memflags = blk_debugfs_lock(q);
block/blk-sysfs.c
932
q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
block/blk-sysfs.c
933
if (queue_is_mq(q))
block/blk-sysfs.c
934
blk_mq_debugfs_register(q);
block/blk-sysfs.c
935
blk_debugfs_unlock(q, memflags);
block/blk-sysfs.c
945
if (queue_is_mq(q))
block/blk-sysfs.c
946
elevator_set_default(q);
block/blk-sysfs.c
948
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
block/blk-sysfs.c
953
if (q->elevator)
block/blk-sysfs.c
954
kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
block/blk-sysfs.c
955
mutex_unlock(&q->sysfs_lock);
block/blk-sysfs.c
966
blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
block/blk-sysfs.c
967
percpu_ref_switch_to_percpu(&q->q_usage_counter);
block/blk-sysfs.c
975
mutex_unlock(&q->sysfs_lock);
block/blk-sysfs.c
976
if (queue_is_mq(q))
block/blk-sysfs.c
992
struct request_queue *q = disk->queue;
block/blk-sysfs.c
994
if (WARN_ON(!q))
block/blk-sysfs.c
998
if (!blk_queue_registered(q))
block/blk-throttle.c
1126
struct request_queue *q;
block/blk-throttle.c
1132
q = tg->pd.blkg->q;
block/blk-throttle.c
1134
q = td->queue;
block/blk-throttle.c
1136
spin_lock_irq(&q->queue_lock);
block/blk-throttle.c
1138
if (!q->root_blkg)
block/blk-throttle.c
1162
spin_unlock_irq(&q->queue_lock);
block/blk-throttle.c
1164
spin_lock_irq(&q->queue_lock);
block/blk-throttle.c
1187
spin_unlock_irq(&q->queue_lock);
block/blk-throttle.c
1203
struct request_queue *q = td->queue;
block/blk-throttle.c
1211
spin_lock_irq(&q->queue_lock);
block/blk-throttle.c
1215
spin_unlock_irq(&q->queue_lock);
block/blk-throttle.c
1311
struct request_queue *q = disk->queue;
block/blk-throttle.c
1316
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
block/blk-throttle.c
1326
q->td = td;
block/blk-throttle.c
1327
td->queue = q;
block/blk-throttle.c
1332
q->td = NULL;
block/blk-throttle.c
1617
static void throtl_shutdown_wq(struct request_queue *q)
block/blk-throttle.c
1619
struct throtl_data *td = q->td;
block/blk-throttle.c
1673
struct request_queue *q = disk->queue;
block/blk-throttle.c
1677
if (!blk_throtl_activated(q))
block/blk-throttle.c
1680
spin_lock_irq(&q->queue_lock);
block/blk-throttle.c
1687
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
block/blk-throttle.c
1699
spin_unlock_irq(&q->queue_lock);
block/blk-throttle.c
1732
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk-throttle.c
1742
spin_lock_irq(&q->queue_lock);
block/blk-throttle.c
1818
spin_unlock_irq(&q->queue_lock);
block/blk-throttle.c
1826
struct request_queue *q = disk->queue;
block/blk-throttle.c
1832
if (!q->td)
block/blk-throttle.c
1835
timer_delete_sync(&q->td->service_queue.pending_timer);
block/blk-throttle.c
1836
throtl_shutdown_wq(q);
block/blk-throttle.c
1837
kfree(q->td);
block/blk-throttle.c
303
struct throtl_data *td = blkg->q->td;
block/blk-throttle.h
157
static inline bool blk_throtl_activated(struct request_queue *q)
block/blk-throttle.h
165
return q->td != NULL && blkcg_policy_enabled(q, &blkcg_policy_throtl);
block/blk-timeout.c
130
struct request_queue *q = req->q;
block/blk-timeout.c
138
req->timeout = q->rq_timeout;
block/blk-timeout.c
152
if (!timer_pending(&q->timeout) ||
block/blk-timeout.c
153
time_before(expiry, q->timeout.expires)) {
block/blk-timeout.c
154
unsigned long diff = q->timeout.expires - expiry;
block/blk-timeout.c
163
if (!timer_pending(&q->timeout) || (diff >= HZ / 2))
block/blk-timeout.c
164
mod_timer(&q->timeout, expiry);
block/blk-timeout.c
23
bool __blk_should_fake_timeout(struct request_queue *q)
block/blk-timeout.c
55
struct request_queue *q = disk->queue;
block/blk-timeout.c
60
blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
block/blk-timeout.c
62
blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
block/blk-timeout.c
87
kblockd_schedule_work(&req->q->timeout_work);
block/blk-wbt.c
1003
val = wbt_default_latency_nsec(q);
block/blk-wbt.c
1007
if (wbt_get_min_lat(q) == val)
block/blk-wbt.c
1010
blk_mq_quiesce_queue(q);
block/blk-wbt.c
1013
wbt_set_min_lat(q, val);
block/blk-wbt.c
1016
blk_mq_unquiesce_queue(q);
block/blk-wbt.c
1018
blk_mq_unfreeze_queue(q, memflags);
block/blk-wbt.c
1020
memflags = blk_debugfs_lock(q);
block/blk-wbt.c
1021
blk_mq_debugfs_register_rq_qos(q);
block/blk-wbt.c
1022
blk_debugfs_unlock(q, memflags);
block/blk-wbt.c
496
bool wbt_disabled(struct request_queue *q)
block/blk-wbt.c
498
struct rq_qos *rqos = wbt_rq_qos(q);
block/blk-wbt.c
503
u64 wbt_get_min_lat(struct request_queue *q)
block/blk-wbt.c
505
struct rq_qos *rqos = wbt_rq_qos(q);
block/blk-wbt.c
511
static void wbt_set_min_lat(struct request_queue *q, u64 val)
block/blk-wbt.c
513
struct rq_qos *rqos = wbt_rq_qos(q);
block/blk-wbt.c
741
struct request_queue *q = disk->queue;
block/blk-wbt.c
747
if (blk_queue_disable_wbt(q))
block/blk-wbt.c
751
rqos = wbt_rq_qos(q);
block/blk-wbt.c
761
if (!blk_queue_registered(q))
block/blk-wbt.c
764
if (queue_is_mq(q) && enable)
block/blk-wbt.c
777
struct request_queue *q = disk->queue;
block/blk-wbt.c
793
memflags = blk_debugfs_lock(q);
block/blk-wbt.c
794
blk_mq_debugfs_register_rq_qos(q);
block/blk-wbt.c
795
blk_debugfs_unlock(q, memflags);
block/blk-wbt.c
798
static u64 wbt_default_latency_nsec(struct request_queue *q)
block/blk-wbt.c
804
if (blk_queue_rot(q))
block/blk-wbt.c
946
struct request_queue *q = disk->queue;
block/blk-wbt.c
957
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
block/blk-wbt.c
958
rwb->rq_depth.queue_depth = blk_queue_depth(q);
block/blk-wbt.c
964
mutex_lock(&q->rq_qos_mutex);
block/blk-wbt.c
966
mutex_unlock(&q->rq_qos_mutex);
block/blk-wbt.c
970
blk_stat_add_callback(q, rwb->cb);
block/blk-wbt.c
976
struct request_queue *q = disk->queue;
block/blk-wbt.c
977
struct rq_qos *rqos = wbt_rq_qos(q);
block/blk-wbt.c
993
memflags = blk_mq_freeze_queue(q);
block/blk-wbt.h
11
u64 wbt_get_min_lat(struct request_queue *q);
block/blk-wbt.h
12
bool wbt_disabled(struct request_queue *q);
block/blk-zoned.c
1291
struct request_queue *q = req->q;
block/blk-zoned.c
1292
struct gendisk *disk = q->disk;
block/blk-zoned.c
1308
if (blk_queue_nomerges(q))
block/blk-zoned.c
1336
blk_queue_exit(q);
block/blk-zoned.c
1709
struct gendisk *disk = req->q->disk;
block/blk-zoned.c
1948
struct request_queue *q = disk->queue;
block/blk-zoned.c
1954
lim = queue_limits_start_update(q);
block/blk-zoned.c
1956
memflags = blk_mq_freeze_queue(q);
block/blk-zoned.c
1960
queue_limits_cancel_update(q);
block/blk-zoned.c
2007
ret = queue_limits_commit_update(q, &lim);
block/blk-zoned.c
2013
blk_mq_unfreeze_queue(q, memflags);
block/blk-zoned.c
2207
struct request_queue *q = disk->queue;
block/blk-zoned.c
2208
sector_t zone_sectors = q->limits.chunk_sectors;
block/blk-zoned.c
2218
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
block/blk-zoned.c
2267
memflags = blk_mq_freeze_queue(q);
block/blk-zoned.c
2269
blk_mq_unfreeze_queue(q, memflags);
block/blk-zoned.c
2345
struct request_queue *q = data;
block/blk-zoned.c
2346
struct gendisk *disk = q->disk;
block/blk-zoned.c
728
struct request_queue *q = zwplug->disk->queue;
block/blk-zoned.c
734
blk_queue_exit(q);
block/blk.h
116
bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
block/blk.h
119
static inline bool biovec_phys_mergeable(struct request_queue *q,
block/blk.h
122
unsigned long mask = queue_segment_boundary(q);
block/blk.h
194
queue_max_discard_segments(req->q) > 1)
block/blk.h
202
return queue_max_discard_segments(rq->q);
block/blk.h
203
return queue_max_segments(rq->q);
block/blk.h
208
struct request_queue *q = rq->q;
block/blk.h
212
return min(q->limits.max_discard_sectors,
block/blk.h
216
return min(q->limits.max_secure_erase_sectors,
block/blk.h
220
return q->limits.max_write_zeroes_sectors;
block/blk.h
223
return q->limits.atomic_write_max_sectors;
block/blk.h
225
return q->limits.max_sectors;
block/blk.h
259
return bvec_gap_to_prev(&req->q->limits,
block/blk.h
270
return bvec_gap_to_prev(&req->q->limits,
block/blk.h
321
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
block/blk.h
323
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
block/blk.h
339
void elv_update_nr_hw_queues(struct request_queue *q,
block/blk.h
341
void elevator_set_default(struct request_queue *q);
block/blk.h
342
void elevator_set_none(struct request_queue *q);
block/blk.h
452
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
block/blk.h
465
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
block/blk.h
468
if (req == q->last_merge)
block/blk.h
469
q->last_merge = NULL;
block/blk.h
475
struct io_cq *ioc_find_get_icq(struct request_queue *q);
block/blk.h
476
struct io_cq *ioc_lookup_icq(struct request_queue *q);
block/blk.h
478
void ioc_clear_queue(struct request_queue *q);
block/blk.h
480
static inline void ioc_clear_queue(struct request_queue *q)
block/blk.h
50
void blk_free_flush_queue(struct blk_flush_queue *q);
block/blk.h
52
bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
block/blk.h
53
bool blk_queue_start_drain(struct request_queue *q);
block/blk.h
54
bool __blk_freeze_queue_start(struct request_queue *q,
block/blk.h
56
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
block/blk.h
60
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
block/blk.h
600
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
block/blk.h
63
if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
block/blk.h
70
if (blk_queue_pm_only(q) &&
block/blk.h
708
static inline void blk_freeze_acquire_lock(struct request_queue *q)
block/blk.h
71
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
block/blk.h
710
if (!q->mq_freeze_disk_dead)
block/blk.h
711
rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
block/blk.h
712
if (!q->mq_freeze_queue_dying)
block/blk.h
713
rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
block/blk.h
716
static inline void blk_unfreeze_release_lock(struct request_queue *q)
block/blk.h
718
if (!q->mq_freeze_queue_dying)
block/blk.h
719
rwsem_release(&q->q_lockdep_map, _RET_IP_);
block/blk.h
720
if (!q->mq_freeze_disk_dead)
block/blk.h
721
rwsem_release(&q->io_lockdep_map, _RET_IP_);
block/blk.h
724
static inline void blk_freeze_acquire_lock(struct request_queue *q)
block/blk.h
727
static inline void blk_unfreeze_release_lock(struct request_queue *q)
block/blk.h
738
static inline void blk_debugfs_lock_nomemsave(struct request_queue *q)
block/blk.h
740
mutex_lock(&q->debugfs_mutex);
block/blk.h
743
static inline void blk_debugfs_unlock_nomemrestore(struct request_queue *q)
block/blk.h
745
mutex_unlock(&q->debugfs_mutex);
block/blk.h
748
static inline unsigned int __must_check blk_debugfs_lock(struct request_queue *q)
block/blk.h
752
blk_debugfs_lock_nomemsave(q);
block/blk.h
756
static inline void blk_debugfs_unlock(struct request_queue *q,
block/blk.h
759
blk_debugfs_unlock_nomemrestore(q);
block/blk.h
78
blk_queue_exit(q);
block/blk.h
86
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
block/blk.h
88
if (blk_try_enter_queue(q, false)) {
block/blk.h
89
rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
block/blk.h
90
rwsem_release(&q->io_lockdep_map, _RET_IP_);
block/blk.h
93
return __bio_queue_enter(q, bio);
block/bsg-lib.c
196
if (likely(!blk_should_fake_timeout(rq->q)))
block/bsg-lib.c
275
struct request_queue *q = hctx->queue;
block/bsg-lib.c
276
struct device *dev = q->queuedata;
block/bsg-lib.c
279
container_of(q->tag_set, struct bsg_set, tag_set);
block/bsg-lib.c
28
static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
block/bsg-lib.c
320
void bsg_remove_queue(struct request_queue *q)
block/bsg-lib.c
322
if (q) {
block/bsg-lib.c
324
container_of(q->tag_set, struct bsg_set, tag_set);
block/bsg-lib.c
327
blk_mq_destroy_queue(q);
block/bsg-lib.c
328
blk_put_queue(q);
block/bsg-lib.c
338
container_of(rq->q->tag_set, struct bsg_set, tag_set);
block/bsg-lib.c
368
struct request_queue *q;
block/bsg-lib.c
388
q = blk_mq_alloc_queue(set, lim, dev);
block/bsg-lib.c
389
if (IS_ERR(q)) {
block/bsg-lib.c
390
ret = PTR_ERR(q);
block/bsg-lib.c
394
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
block/bsg-lib.c
396
bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
block/bsg-lib.c
402
return q;
block/bsg-lib.c
404
blk_mq_destroy_queue(q);
block/bsg-lib.c
405
blk_put_queue(q);
block/bsg-lib.c
43
rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
block/bsg-lib.c
64
job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
block/bsg-lib.c
70
ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
block/bsg-lib.c
84
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
block/bsg-lib.c
87
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
block/bsg.c
107
struct request_queue *q = bd->queue;
block/bsg.c
138
return put_user(min(bd->reserved_size, queue_max_bytes(q)),
block/bsg.c
146
min_t(unsigned int, val, queue_max_bytes(q));
block/bsg.c
189
struct bsg_device *bsg_register_queue(struct request_queue *q,
block/bsg.c
200
bd->queue = q;
block/bsg.c
223
if (q->disk && q->disk->queue_kobj.sd) {
block/bsg.c
224
ret = sysfs_create_link(&q->disk->queue_kobj, &bd->device.kobj,
block/elevator.c
123
struct elevator_queue *elevator_alloc(struct request_queue *q,
block/elevator.c
128
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
block/elevator.c
152
static void elevator_exit(struct request_queue *q)
block/elevator.c
154
struct elevator_queue *e = q->elevator;
block/elevator.c
156
lockdep_assert_held(&q->elevator_lock);
block/elevator.c
158
ioc_clear_queue(q);
block/elevator.c
161
blk_mq_exit_sched(q, e);
block/elevator.c
171
void elv_rqhash_del(struct request_queue *q, struct request *rq)
block/elevator.c
178
void elv_rqhash_add(struct request_queue *q, struct request *rq)
block/elevator.c
180
struct elevator_queue *e = q->elevator;
block/elevator.c
188
void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
block/elevator.c
191
elv_rqhash_add(q, rq);
block/elevator.c
194
struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
block/elevator.c
196
struct elevator_queue *e = q->elevator;
block/elevator.c
268
enum elv_merge elv_merge(struct request_queue *q, struct request **req,
block/elevator.c
271
struct elevator_queue *e = q->elevator;
block/elevator.c
280
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
block/elevator.c
286
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
block/elevator.c
287
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
block/elevator.c
290
*req = q->last_merge;
block/elevator.c
295
if (blk_queue_noxmerges(q))
block/elevator.c
301
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
block/elevator.c
311
return e->type->ops.request_merge(q, req, bio);
block/elevator.c
324
bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
block/elevator.c
330
if (blk_queue_nomerges(q))
block/elevator.c
336
if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
block/elevator.c
341
if (blk_queue_noxmerges(q))
block/elevator.c
349
__rq = elv_rqhash_find(q, blk_rq_pos(rq));
block/elevator.c
350
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
block/elevator.c
362
void elv_merged_request(struct request_queue *q, struct request *rq,
block/elevator.c
365
struct elevator_queue *e = q->elevator;
block/elevator.c
368
e->type->ops.request_merged(q, rq, type);
block/elevator.c
371
elv_rqhash_reposition(q, rq);
block/elevator.c
373
q->last_merge = rq;
block/elevator.c
376
void elv_merge_requests(struct request_queue *q, struct request *rq,
block/elevator.c
379
struct elevator_queue *e = q->elevator;
block/elevator.c
382
e->type->ops.requests_merged(q, rq, next);
block/elevator.c
384
elv_rqhash_reposition(q, rq);
block/elevator.c
385
q->last_merge = rq;
block/elevator.c
388
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
block/elevator.c
390
struct elevator_queue *e = q->elevator;
block/elevator.c
393
return e->type->ops.next_request(q, rq);
block/elevator.c
398
struct request *elv_former_request(struct request_queue *q, struct request *rq)
block/elevator.c
400
struct elevator_queue *e = q->elevator;
block/elevator.c
403
return e->type->ops.former_request(q, rq);
block/elevator.c
457
static int elv_register_queue(struct request_queue *q,
block/elevator.c
463
error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
block/elevator.c
480
blk_mq_sched_reg_debugfs(q);
block/elevator.c
486
static void elv_unregister_queue(struct request_queue *q,
block/elevator.c
494
blk_mq_sched_unreg_debugfs(q);
block/elevator.c
562
static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
block/elevator.c
567
WARN_ON_ONCE(q->mq_freeze_depth == 0);
block/elevator.c
568
lockdep_assert_held(&q->elevator_lock);
block/elevator.c
576
blk_mq_quiesce_queue(q);
block/elevator.c
578
if (q->elevator) {
block/elevator.c
579
ctx->old = q->elevator;
block/elevator.c
580
elevator_exit(q);
block/elevator.c
584
ret = blk_mq_init_sched(q, new_e, &ctx->res);
block/elevator.c
587
ctx->new = q->elevator;
block/elevator.c
589
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
block/elevator.c
590
q->elevator = NULL;
block/elevator.c
591
q->nr_requests = q->tag_set->queue_depth;
block/elevator.c
592
q->async_depth = q->tag_set->queue_depth;
block/elevator.c
594
blk_add_trace_msg(q, "elv switch: %s", ctx->name);
block/elevator.c
597
blk_mq_unquiesce_queue(q);
block/elevator.c
610
struct request_queue *q)
block/elevator.c
615
memflags = blk_mq_freeze_queue(q);
block/elevator.c
616
mutex_lock(&q->elevator_lock);
block/elevator.c
617
e = q->elevator;
block/elevator.c
618
elevator_exit(q);
block/elevator.c
619
mutex_unlock(&q->elevator_lock);
block/elevator.c
62
struct request_queue *q = rq->q;
block/elevator.c
620
blk_mq_unfreeze_queue(q, memflags);
block/elevator.c
622
blk_mq_free_sched_res(&ctx->res, ctx->type, q->tag_set);
block/elevator.c
627
static int elevator_change_done(struct request_queue *q,
block/elevator.c
63
struct elevator_queue *e = q->elevator;
block/elevator.c
638
elv_unregister_queue(q, ctx->old);
block/elevator.c
639
blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set);
block/elevator.c
643
ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
block/elevator.c
645
elv_exit_and_release(ctx, q);
block/elevator.c
653
static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
block/elevator.c
656
struct blk_mq_tag_set *set = q->tag_set;
block/elevator.c
66
return e->type->ops.allow_merge(q, rq, bio);
block/elevator.c
662
ret = blk_mq_alloc_sched_res(q, ctx->type, &ctx->res,
block/elevator.c
668
memflags = blk_mq_freeze_queue(q);
block/elevator.c
678
blk_mq_cancel_work_sync(q);
block/elevator.c
679
mutex_lock(&q->elevator_lock);
block/elevator.c
680
if (!(q->elevator && elevator_match(q->elevator->type, ctx->name)))
block/elevator.c
681
ret = elevator_switch(q, ctx);
block/elevator.c
682
mutex_unlock(&q->elevator_lock);
block/elevator.c
683
blk_mq_unfreeze_queue(q, memflags);
block/elevator.c
685
ret = elevator_change_done(q, ctx);
block/elevator.c
700
void elv_update_nr_hw_queues(struct request_queue *q,
block/elevator.c
703
struct blk_mq_tag_set *set = q->tag_set;
block/elevator.c
706
WARN_ON_ONCE(q->mq_freeze_depth == 0);
block/elevator.c
708
if (ctx->type && !blk_queue_dying(q) && blk_queue_registered(q)) {
block/elevator.c
709
mutex_lock(&q->elevator_lock);
block/elevator.c
711
ret = elevator_switch(q, ctx);
block/elevator.c
712
mutex_unlock(&q->elevator_lock);
block/elevator.c
714
blk_mq_unfreeze_queue_nomemrestore(q);
block/elevator.c
716
WARN_ON_ONCE(elevator_change_done(q, ctx));
block/elevator.c
729
void elevator_set_default(struct request_queue *q)
block/elevator.c
738
blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
block/elevator.c
740
if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
block/elevator.c
752
if ((q->nr_hw_queues == 1 ||
block/elevator.c
753
blk_mq_is_shared_tags(q->tag_set->flags))) {
block/elevator.c
754
err = elevator_change(q, &ctx);
block/elevator.c
762
void elevator_set_none(struct request_queue *q)
block/elevator.c
769
err = elevator_change(q, &ctx);
block/elevator.c
792
struct request_queue *q = disk->queue;
block/elevator.c
793
struct blk_mq_tag_set *set = q->tag_set;
block/elevator.c
796
if (!blk_queue_registered(q))
block/elevator.c
820
if (!blk_queue_no_elv_switch(q)) {
block/elevator.c
821
ret = elevator_change(q, &ctx);
block/elevator.c
837
struct request_queue *q = disk->queue;
block/elevator.c
841
mutex_lock(&q->elevator_lock);
block/elevator.c
842
if (!q->elevator) {
block/elevator.c
846
cur = q->elevator->type;
block/elevator.c
859
mutex_unlock(&q->elevator_lock);
block/elevator.c
864
struct request *elv_rb_former_request(struct request_queue *q,
block/elevator.c
876
struct request *elv_rb_latter_request(struct request_queue *q,
block/elevator.h
138
void elv_rqhash_del(struct request_queue *q, struct request *rq);
block/elevator.h
139
void elv_rqhash_add(struct request_queue *q, struct request *rq);
block/elevator.h
140
void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
block/elevator.h
141
struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
block/elevator.h
218
void blk_mq_sched_reg_debugfs(struct request_queue *q);
block/elevator.h
219
void blk_mq_sched_unreg_debugfs(struct request_queue *q);
block/elevator.h
68
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
block/genhd.c
1115
struct request_queue *q = bdev_get_queue(bdev);
block/genhd.c
1118
bdev_count_inflight_rw(bdev, inflight, queue_is_mq(q));
block/genhd.c
1446
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
block/genhd.c
1463
disk->queue = q;
block/genhd.c
1484
q->disk = disk;
block/genhd.c
1512
struct request_queue *q;
block/genhd.c
1515
q = blk_alloc_queue(lim ? lim : &default_lim, node);
block/genhd.c
1516
if (IS_ERR(q))
block/genhd.c
1517
return ERR_CAST(q);
block/genhd.c
1519
disk = __alloc_disk_node(q, node, lkclass);
block/genhd.c
1521
blk_put_queue(q);
block/genhd.c
695
struct request_queue *q = disk->queue;
block/genhd.c
728
blk_freeze_acquire_lock(q);
block/genhd.c
755
blk_mq_freeze_queue_wait(q);
block/genhd.c
759
blk_sync_queue(q);
block/genhd.c
762
if (queue_is_mq(q))
block/genhd.c
763
blk_mq_cancel_work_sync(q);
block/genhd.c
765
rq_qos_exit(q);
block/genhd.c
772
__blk_mq_unfreeze_queue(q, true);
block/genhd.c
773
else if (queue_is_mq(q))
block/genhd.c
774
blk_mq_exit_queue(q);
block/genhd.c
777
blk_unfreeze_release_lock(q);
block/genhd.c
780
static void disable_elv_switch(struct request_queue *q)
block/genhd.c
782
struct blk_mq_tag_set *set = q->tag_set;
block/genhd.c
783
WARN_ON_ONCE(!queue_is_mq(q));
block/genhd.c
786
blk_queue_flag_set(QUEUE_FLAG_NO_ELV_SWITCH, q);
block/kyber-iosched.c
150
struct request_queue *q;
block/kyber-iosched.c
350
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
block/kyber-iosched.c
356
kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
block/kyber-iosched.c
360
kqd->q = q;
block/kyber-iosched.c
361
kqd->dev = disk_devt(q->disk);
block/kyber-iosched.c
375
GFP_KERNEL, q->node);
block/kyber-iosched.c
398
static void kyber_depth_updated(struct request_queue *q)
block/kyber-iosched.c
400
blk_mq_set_min_shallow_depth(q, q->async_depth);
block/kyber-iosched.c
403
static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
block/kyber-iosched.c
405
blk_stat_enable_accounting(q);
block/kyber-iosched.c
407
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
block/kyber-iosched.c
409
q->elevator = eq;
block/kyber-iosched.c
410
q->async_depth = q->nr_requests * KYBER_DEFAULT_ASYNC_PERCENT / 100;
block/kyber-iosched.c
411
kyber_depth_updated(q);
block/kyber-iosched.c
416
static void *kyber_alloc_sched_data(struct request_queue *q)
block/kyber-iosched.c
420
kqd = kyber_queue_data_alloc(q);
block/kyber-iosched.c
432
blk_stat_disable_accounting(kqd->q);
block/kyber-iosched.c
550
data->shallow_depth = data->q->async_depth;
block/kyber-iosched.c
553
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
block/kyber-iosched.c
556
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
block/kyber-iosched.c
602
struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
block/kyber-iosched.c
627
struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
block/kyber-iosched.c
889
struct request_queue *q = data; \
block/kyber-iosched.c
890
struct kyber_queue_data *kqd = q->elevator->elevator_data; \
block/mq-deadline.c
167
static void deadline_remove_request(struct request_queue *q,
block/mq-deadline.c
179
elv_rqhash_del(q, rq);
block/mq-deadline.c
180
if (q->last_merge == rq)
block/mq-deadline.c
181
q->last_merge = NULL;
block/mq-deadline.c
184
static void dd_request_merged(struct request_queue *q, struct request *req,
block/mq-deadline.c
187
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
204
static void dd_merged_requests(struct request_queue *q, struct request *req,
block/mq-deadline.c
207
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
230
deadline_remove_request(q, &dd->per_prio[prio], next);
block/mq-deadline.c
243
deadline_remove_request(rq->q, per_prio, rq);
block/mq-deadline.c
491
data->shallow_depth = data->q->async_depth;
block/mq-deadline.c
495
static void dd_depth_updated(struct request_queue *q)
block/mq-deadline.c
497
blk_mq_set_min_shallow_depth(q, q->async_depth);
block/mq-deadline.c
529
static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
block/mq-deadline.c
534
dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
block/mq-deadline.c
559
blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
block/mq-deadline.c
561
q->elevator = eq;
block/mq-deadline.c
562
q->async_depth = q->nr_requests;
block/mq-deadline.c
563
dd_depth_updated(q);
block/mq-deadline.c
571
static int dd_request_merge(struct request_queue *q, struct request **rq,
block/mq-deadline.c
574
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
603
static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
block/mq-deadline.c
606
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
611
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
block/mq-deadline.c
626
struct request_queue *q = hctx->queue;
block/mq-deadline.c
627
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
642
if (blk_mq_sched_try_insert_merge(q, rq, free))
block/mq-deadline.c
654
elv_rqhash_add(q, rq);
block/mq-deadline.c
655
if (!q->last_merge)
block/mq-deadline.c
656
q->last_merge = rq;
block/mq-deadline.c
674
struct request_queue *q = hctx->queue;
block/mq-deadline.c
675
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
803
struct request_queue *q = m->private; \
block/mq-deadline.c
804
struct deadline_data *dd = q->elevator->elevator_data; \
block/mq-deadline.c
814
struct request_queue *q = m->private; \
block/mq-deadline.c
815
struct deadline_data *dd = q->elevator->elevator_data; \
block/mq-deadline.c
824
struct request_queue *q = m->private; \
block/mq-deadline.c
825
struct deadline_data *dd = q->elevator->elevator_data; \
block/mq-deadline.c
840
struct request_queue *q = data; \
block/mq-deadline.c
841
struct deadline_data *dd = q->elevator->elevator_data; \
block/mq-deadline.c
862
struct request_queue *q = data;
block/mq-deadline.c
863
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
871
struct request_queue *q = data;
block/mq-deadline.c
872
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
880
struct request_queue *q = data;
block/mq-deadline.c
881
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
908
struct request_queue *q = data;
block/mq-deadline.c
909
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
926
struct request_queue *q = m->private;
block/mq-deadline.c
927
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
935
struct request_queue *q = m->private;
block/mq-deadline.c
936
struct deadline_data *dd = q->elevator->elevator_data;
block/mq-deadline.c
944
struct request_queue *q = m->private;
block/mq-deadline.c
945
struct deadline_data *dd = q->elevator->elevator_data;
block/t10-pi.c
127
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
178
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
301
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
341
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
451
struct blk_integrity *bi = &rq->q->limits.integrity;
block/t10-pi.c
464
struct blk_integrity *bi = &rq->q->limits.integrity;
crypto/algapi.c
1066
struct crypto_alg *q;
crypto/algapi.c
1070
list_for_each_entry(q, &crypto_alg_list, cra_list) {
crypto/algapi.c
1073
if (!crypto_is_larval(q))
crypto/algapi.c
1076
l = (void *)q;
crypto/algapi.c
249
struct crypto_alg *q;
crypto/algapi.c
251
list_for_each_entry(q, &crypto_alg_list, cra_list) {
crypto/algapi.c
252
if (q == alg)
crypto/algapi.c
255
if (crypto_is_moribund(q))
crypto/algapi.c
258
if (crypto_is_larval(q))
crypto/algapi.c
261
if (strcmp(alg->cra_name, q->cra_name))
crypto/algapi.c
264
if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
crypto/algapi.c
265
q->cra_priority > alg->cra_priority)
crypto/algapi.c
268
crypto_remove_spawns(q, algs_to_put, alg);
crypto/algapi.c
305
struct crypto_alg *q;
crypto/algapi.c
316
list_for_each_entry(q, &crypto_alg_list, cra_list) {
crypto/algapi.c
317
if (q == alg)
crypto/algapi.c
320
if (crypto_is_moribund(q))
crypto/algapi.c
323
if (crypto_is_larval(q)) {
crypto/algapi.c
324
if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
crypto/algapi.c
329
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
crypto/algapi.c
330
!strcmp(q->cra_driver_name, alg->cra_driver_name) ||
crypto/algapi.c
331
!strcmp(q->cra_name, alg->cra_driver_name))
crypto/algapi.c
363
struct crypto_alg *q;
crypto/algapi.c
367
list_for_each_entry(q, &crypto_alg_list, cra_list) {
crypto/algapi.c
368
if (crypto_is_moribund(q) || !crypto_is_larval(q))
crypto/algapi.c
371
test = (struct crypto_larval *)q;
crypto/algapi.c
373
if (!strcmp(q->cra_driver_name, name))
crypto/algapi.c
382
q->cra_flags |= CRYPTO_ALG_DEAD;
crypto/algapi.c
537
struct crypto_template *q;
crypto/algapi.c
546
list_for_each_entry(q, &crypto_template_list, list) {
crypto/algapi.c
547
if (q == tmpl)
crypto/algapi.c
619
struct crypto_template *q, *tmpl = NULL;
crypto/algapi.c
622
list_for_each_entry(q, &crypto_template_list, list) {
crypto/algapi.c
623
if (strcmp(q->name, name))
crypto/algapi.c
625
if (unlikely(!crypto_tmpl_get(q)))
crypto/algapi.c
628
tmpl = q;
crypto/api.c
62
struct crypto_alg *q, *alg = NULL;
crypto/api.c
65
list_for_each_entry(q, &crypto_alg_list, cra_list) {
crypto/api.c
68
if (crypto_is_moribund(q))
crypto/api.c
71
if ((q->cra_flags ^ type) & mask)
crypto/api.c
74
exact = !strcmp(q->cra_driver_name, name);
crypto/api.c
75
fuzzy = !strcmp(q->cra_name, name);
crypto/api.c
76
if (!exact && !(fuzzy && q->cra_priority > best))
crypto/api.c
79
if (unlikely(!crypto_mod_get(q)))
crypto/api.c
82
best = q->cra_priority;
crypto/api.c
85
alg = q;
crypto/asymmetric_keys/x509_public_key.c
167
const char *q;
crypto/asymmetric_keys/x509_public_key.c
197
q = cert->raw_skid;
crypto/asymmetric_keys/x509_public_key.c
200
q = cert->raw_serial;
crypto/asymmetric_keys/x509_public_key.c
210
p = bin2hex(p, q, srlen);
crypto/async_tx/async_pq.c
382
void *p, *q, *s;
crypto/async_tx/async_pq.c
419
q = page_address(q_src) + q_off;
crypto/async_tx/async_pq.c
421
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
crypto/async_tx/async_raid6_recov.c
158
struct page *p, *q, *a, *b;
crypto/async_tx/async_raid6_recov.c
170
q = blocks[disks-1];
crypto/async_tx/async_raid6_recov.c
182
srcs[1] = q;
crypto/async_tx/async_raid6_recov.c
208
struct page *p, *q, *g, *dp, *dq;
crypto/async_tx/async_raid6_recov.c
233
q = blocks[disks-1];
crypto/async_tx/async_raid6_recov.c
265
srcs[1] = q;
crypto/async_tx/async_raid6_recov.c
299
struct page *p, *q, *dp, *dq;
crypto/async_tx/async_raid6_recov.c
311
q = blocks[disks-1];
crypto/async_tx/async_raid6_recov.c
339
blocks[disks-1] = q;
crypto/async_tx/async_raid6_recov.c
354
srcs[1] = q;
crypto/async_tx/async_raid6_recov.c
476
struct page *p, *q, *dq;
crypto/async_tx/async_raid6_recov.c
527
q = blocks[disks-1];
crypto/async_tx/async_raid6_recov.c
563
blocks[disks-1] = q;
crypto/async_tx/async_raid6_recov.c
571
srcs[1] = q;
crypto/crypto_user.c
37
struct crypto_alg *q, *alg = NULL;
crypto/crypto_user.c
41
list_for_each_entry(q, &crypto_alg_list, cra_list) {
crypto/crypto_user.c
44
if (crypto_is_larval(q))
crypto/crypto_user.c
47
if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
crypto/crypto_user.c
51
match = !strcmp(q->cra_driver_name,
crypto/crypto_user.c
54
match = !strcmp(q->cra_name, p->cru_name);
crypto/crypto_user.c
59
if (unlikely(!crypto_mod_get(q)))
crypto/crypto_user.c
62
alg = q;
crypto/dh.c
109
MPI val, q;
crypto/dh.c
138
q = mpi_alloc(mpi_get_nlimbs(ctx->p));
crypto/dh.c
139
if (!q) {
crypto/dh.c
148
ret = mpi_rshift(q, ctx->p, 1) ?:
crypto/dh.c
149
mpi_powm(val, y, q, ctx->p);
crypto/dh.c
151
mpi_free(q);
crypto/ecc.c
1384
const struct ecc_point *p, const struct ecc_point *q,
crypto/ecc.c
1392
vli_set(result->x, q->x, ndigits);
crypto/ecc.c
1393
vli_set(result->y, q->y, ndigits);
crypto/ecc.c
1407
const u64 *u2, const struct ecc_point *q,
crypto/ecc.c
1422
ecc_point_add(&sum, p, q, curve);
crypto/ecc.c
1425
points[2] = q;
crypto/ecc.c
583
u64 q[ECC_MAX_DIGITS];
crypto/ecc.c
594
vli_set(q, product + ndigits, ndigits);
crypto/ecc.c
599
for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
crypto/ecc.c
602
vli_umult(qc, q, c2, ndigits);
crypto/ecc.c
605
vli_set(q, qc + ndigits, ndigits);
crypto/ecc.c
681
u64 q[ECC_MAX_DIGITS * 2];
crypto/ecc.c
685
vli_mult(q, product + ndigits, mu, ndigits);
crypto/ecc.c
687
vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
crypto/ecc.c
688
vli_mult(r, mod, q + ndigits, ndigits);
crypto/essiv.c
389
const char *p, *q;
crypto/essiv.c
398
q = strchr(p, ')');
crypto/essiv.c
399
if (!q)
crypto/essiv.c
402
len = q - p;
crypto/rsa.c
106
mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?:
crypto/rsa.c
203
mpi_free(key->q);
crypto/rsa.c
21
MPI q;
crypto/rsa.c
211
key->q = NULL;
crypto/rsa.c
338
mpi_key->q = mpi_read_raw_data(raw_key.q, raw_key.q_sz);
crypto/rsa.c
339
if (!mpi_key->q)
crypto/rsa.c
96
ret = mpi_powm(m2, c, key->dq, key->q);
crypto/rsa_helper.c
100
key->q = value;
crypto/testmgr.c
2719
char *q;
crypto/testmgr.c
2782
q = data;
crypto/testmgr.c
2783
if (memcmp(q, result, template[i].len)) {
crypto/testmgr.c
2786
hexdump(q, template[i].len);
drivers/accel/habanalabs/common/habanalabs.h
3796
void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
drivers/accel/habanalabs/common/habanalabs.h
3807
int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
drivers/accel/habanalabs/common/habanalabs.h
3808
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
drivers/accel/habanalabs/common/habanalabs.h
3809
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
drivers/accel/habanalabs/common/habanalabs.h
3810
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
drivers/accel/habanalabs/common/habanalabs.h
3811
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
drivers/accel/habanalabs/common/habanalabs.h
3812
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
drivers/accel/habanalabs/common/habanalabs.h
3813
void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q);
drivers/accel/habanalabs/common/hw_queue.c
1001
q->hw_queue_id = hw_queue_id;
drivers/accel/habanalabs/common/hw_queue.c
1003
switch (q->queue_type) {
drivers/accel/habanalabs/common/hw_queue.c
1005
rc = ext_queue_init(hdev, q);
drivers/accel/habanalabs/common/hw_queue.c
1008
rc = int_queue_init(hdev, q);
drivers/accel/habanalabs/common/hw_queue.c
1011
rc = cpu_queue_init(hdev, q);
drivers/accel/habanalabs/common/hw_queue.c
1014
rc = hw_queue_init(hdev, q);
drivers/accel/habanalabs/common/hw_queue.c
1017
q->valid = 0;
drivers/accel/habanalabs/common/hw_queue.c
1021
q->queue_type);
drivers/accel/habanalabs/common/hw_queue.c
1026
sync_stream_queue_init(hdev, q->hw_queue_id);
drivers/accel/habanalabs/common/hw_queue.c
1031
q->valid = 1;
drivers/accel/habanalabs/common/hw_queue.c
104
q->pi = hl_queue_inc_ptr(q->pi);
drivers/accel/habanalabs/common/hw_queue.c
1044
static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
drivers/accel/habanalabs/common/hw_queue.c
1046
if (!q->valid)
drivers/accel/habanalabs/common/hw_queue.c
106
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
drivers/accel/habanalabs/common/hw_queue.c
1067
if (q->queue_type == QUEUE_TYPE_INT)
drivers/accel/habanalabs/common/hw_queue.c
1070
kfree(q->shadow_queue);
drivers/accel/habanalabs/common/hw_queue.c
1072
if (q->queue_type == QUEUE_TYPE_CPU)
drivers/accel/habanalabs/common/hw_queue.c
1073
hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
drivers/accel/habanalabs/common/hw_queue.c
1075
hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
drivers/accel/habanalabs/common/hw_queue.c
1076
q->bus_address);
drivers/accel/habanalabs/common/hw_queue.c
1082
struct hl_hw_queue *q;
drivers/accel/habanalabs/common/hw_queue.c
1094
for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
drivers/accel/habanalabs/common/hw_queue.c
1095
i < asic->max_queues ; i++, q_ready_cnt++, q++) {
drivers/accel/habanalabs/common/hw_queue.c
1097
q->queue_type = asic->hw_queues_props[i].type;
drivers/accel/habanalabs/common/hw_queue.c
1098
q->supports_sync_stream =
drivers/accel/habanalabs/common/hw_queue.c
1100
q->collective_mode = asic->hw_queues_props[i].collective_mode;
drivers/accel/habanalabs/common/hw_queue.c
1101
q->dram_bd = asic->hw_queues_props[i].dram_bd;
drivers/accel/habanalabs/common/hw_queue.c
1103
rc = queue_init(hdev, q, i);
drivers/accel/habanalabs/common/hw_queue.c
1111
if (q->dram_bd)
drivers/accel/habanalabs/common/hw_queue.c
1112
q->pq_dram_address = asic->hw_queues_props[i].q_dram_bd_address;
drivers/accel/habanalabs/common/hw_queue.c
1118
for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
drivers/accel/habanalabs/common/hw_queue.c
1119
queue_fini(hdev, q);
drivers/accel/habanalabs/common/hw_queue.c
1128
struct hl_hw_queue *q;
drivers/accel/habanalabs/common/hw_queue.c
1132
for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++)
drivers/accel/habanalabs/common/hw_queue.c
1133
queue_fini(hdev, q);
drivers/accel/habanalabs/common/hw_queue.c
1140
struct hl_hw_queue *q;
drivers/accel/habanalabs/common/hw_queue.c
1144
for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) {
drivers/accel/habanalabs/common/hw_queue.c
1145
if ((!q->valid) ||
drivers/accel/habanalabs/common/hw_queue.c
1146
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
drivers/accel/habanalabs/common/hw_queue.c
1148
q->pi = 0;
drivers/accel/habanalabs/common/hw_queue.c
1149
atomic_set(&q->ci, 0);
drivers/accel/habanalabs/common/hw_queue.c
1151
if (q->supports_sync_stream)
drivers/accel/habanalabs/common/hw_queue.c
1152
sync_stream_queue_reset(hdev, q->hw_queue_id);
drivers/accel/habanalabs/common/hw_queue.c
128
struct hl_hw_queue *q, int num_of_entries,
drivers/accel/habanalabs/common/hw_queue.c
132
&hdev->completion_queue[q->cq_id].free_slots_cnt;
drivers/accel/habanalabs/common/hw_queue.c
136
free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
drivers/accel/habanalabs/common/hw_queue.c
140
q->hw_queue_id, num_of_entries);
drivers/accel/habanalabs/common/hw_queue.c
154
num_of_entries, q->hw_queue_id);
drivers/accel/habanalabs/common/hw_queue.c
177
struct hl_hw_queue *q,
drivers/accel/habanalabs/common/hw_queue.c
182
if (num_of_entries > q->int_queue_len) {
drivers/accel/habanalabs/common/hw_queue.c
185
q->hw_queue_id, num_of_entries);
drivers/accel/habanalabs/common/hw_queue.c
190
free_slots_cnt = queue_free_slots(q, q->int_queue_len);
drivers/accel/habanalabs/common/hw_queue.c
194
q->hw_queue_id, num_of_entries);
drivers/accel/habanalabs/common/hw_queue.c
211
static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
drivers/accel/habanalabs/common/hw_queue.c
217
free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
drivers/accel/habanalabs/common/hw_queue.c
221
q->hw_queue_id, num_of_entries);
drivers/accel/habanalabs/common/hw_queue.c
242
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
drivers/accel/habanalabs/common/hw_queue.c
257
if (q->queue_type != QUEUE_TYPE_HW) {
drivers/accel/habanalabs/common/hw_queue.c
258
rc = ext_queue_sanity_checks(hdev, q, 1, false);
drivers/accel/habanalabs/common/hw_queue.c
263
hl_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
drivers/accel/habanalabs/common/hw_queue.c
282
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
drivers/accel/habanalabs/common/hw_queue.c
295
ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
drivers/accel/habanalabs/common/hw_queue.c
306
((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
drivers/accel/habanalabs/common/hw_queue.c
31
static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
drivers/accel/habanalabs/common/hw_queue.c
318
cq = &hdev->completion_queue[q->cq_id];
drivers/accel/habanalabs/common/hw_queue.c
325
q->msi_vec,
drivers/accel/habanalabs/common/hw_queue.c
328
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
drivers/accel/habanalabs/common/hw_queue.c
33
int delta = (q->pi - queue_ci_get(&q->ci, queue_len));
drivers/accel/habanalabs/common/hw_queue.c
333
hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
drivers/accel/habanalabs/common/hw_queue.c
347
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
drivers/accel/habanalabs/common/hw_queue.c
362
pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd);
drivers/accel/habanalabs/common/hw_queue.c
364
q->pi++;
drivers/accel/habanalabs/common/hw_queue.c
365
q->pi &= ((q->int_queue_len << 1) - 1);
drivers/accel/habanalabs/common/hw_queue.c
369
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
drivers/accel/habanalabs/common/hw_queue.c
383
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
drivers/accel/habanalabs/common/hw_queue.c
395
((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
drivers/accel/habanalabs/common/hw_queue.c
412
hl_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
drivers/accel/habanalabs/common/hw_queue.c
44
struct hl_hw_queue *q;
drivers/accel/habanalabs/common/hw_queue.c
50
q = &hdev->kernel_queues[0];
drivers/accel/habanalabs/common/hw_queue.c
53
if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW)
drivers/accel/habanalabs/common/hw_queue.c
61
for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) {
drivers/accel/habanalabs/common/hw_queue.c
62
if (!cs_needs_completion(cs) || q->queue_type == QUEUE_TYPE_INT)
drivers/accel/habanalabs/common/hw_queue.c
63
atomic_add(cs->jobs_in_queue_cnt[i], &q->ci);
drivers/accel/habanalabs/common/hw_queue.c
643
struct hl_hw_queue *q;
drivers/accel/habanalabs/common/hw_queue.c
663
q = &hdev->kernel_queues[0];
drivers/accel/habanalabs/common/hw_queue.c
664
for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) {
drivers/accel/habanalabs/common/hw_queue.c
666
switch (q->queue_type) {
drivers/accel/habanalabs/common/hw_queue.c
668
rc = ext_queue_sanity_checks(hdev, q,
drivers/accel/habanalabs/common/hw_queue.c
674
rc = int_queue_sanity_checks(hdev, q,
drivers/accel/habanalabs/common/hw_queue.c
678
rc = hw_queue_sanity_checks(hdev, q,
drivers/accel/habanalabs/common/hw_queue.c
683
q->queue_type);
drivers/accel/habanalabs/common/hw_queue.c
695
if (q->queue_type == QUEUE_TYPE_EXT)
drivers/accel/habanalabs/common/hw_queue.c
794
q = &hdev->kernel_queues[0];
drivers/accel/habanalabs/common/hw_queue.c
795
for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) {
drivers/accel/habanalabs/common/hw_queue.c
796
if ((q->queue_type == QUEUE_TYPE_EXT) &&
drivers/accel/habanalabs/common/hw_queue.c
819
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
drivers/accel/habanalabs/common/hw_queue.c
821
atomic_inc(&q->ci);
drivers/accel/habanalabs/common/hw_queue.c
824
static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
drivers/accel/habanalabs/common/hw_queue.c
83
void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
drivers/accel/habanalabs/common/hw_queue.c
831
p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
drivers/accel/habanalabs/common/hw_queue.c
833
p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
drivers/accel/habanalabs/common/hw_queue.c
838
q->kernel_address = p;
drivers/accel/habanalabs/common/hw_queue.c
840
q->shadow_queue = kmalloc_objs(struct hl_cs_job *, HL_QUEUE_LENGTH);
drivers/accel/habanalabs/common/hw_queue.c
841
if (!q->shadow_queue) {
drivers/accel/habanalabs/common/hw_queue.c
844
q->hw_queue_id);
drivers/accel/habanalabs/common/hw_queue.c
850
atomic_set(&q->ci, 0);
drivers/accel/habanalabs/common/hw_queue.c
851
q->pi = 0;
drivers/accel/habanalabs/common/hw_queue.c
857
hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
drivers/accel/habanalabs/common/hw_queue.c
859
hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
drivers/accel/habanalabs/common/hw_queue.c
860
q->bus_address);
drivers/accel/habanalabs/common/hw_queue.c
865
static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
drivers/accel/habanalabs/common/hw_queue.c
869
p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
drivers/accel/habanalabs/common/hw_queue.c
870
&q->bus_address, &q->int_queue_len);
drivers/accel/habanalabs/common/hw_queue.c
874
q->hw_queue_id);
drivers/accel/habanalabs/common/hw_queue.c
878
q->kernel_address = p;
drivers/accel/habanalabs/common/hw_queue.c
879
q->pi = 0;
drivers/accel/habanalabs/common/hw_queue.c
880
atomic_set(&q->ci, 0);
drivers/accel/habanalabs/common/hw_queue.c
885
static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
drivers/accel/habanalabs/common/hw_queue.c
887
return ext_and_cpu_queue_init(hdev, q, true);
drivers/accel/habanalabs/common/hw_queue.c
890
static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
drivers/accel/habanalabs/common/hw_queue.c
892
return ext_and_cpu_queue_init(hdev, q, false);
drivers/accel/habanalabs/common/hw_queue.c
895
static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
drivers/accel/habanalabs/common/hw_queue.c
899
p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
drivers/accel/habanalabs/common/hw_queue.c
90
bd = q->kernel_address;
drivers/accel/habanalabs/common/hw_queue.c
904
q->kernel_address = p;
drivers/accel/habanalabs/common/hw_queue.c
907
atomic_set(&q->ci, 0);
drivers/accel/habanalabs/common/hw_queue.c
908
q->pi = 0;
drivers/accel/habanalabs/common/hw_queue.c
91
bd += hl_pi_2_offset(q->pi);
drivers/accel/habanalabs/common/hw_queue.c
96
if (q->dram_bd)
drivers/accel/habanalabs/common/hw_queue.c
98
addr = q->pq_dram_address +
drivers/accel/habanalabs/common/hw_queue.c
99
((hl_pi_2_offset(q->pi) * sizeof(struct hl_bd)) + (i * sizeof(u64)));
drivers/accel/habanalabs/common/hw_queue.c
996
static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
drivers/accel/habanalabs/common/irq.c
596
int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
drivers/accel/habanalabs/common/irq.c
600
p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
drivers/accel/habanalabs/common/irq.c
605
q->hdev = hdev;
drivers/accel/habanalabs/common/irq.c
606
q->kernel_address = p;
drivers/accel/habanalabs/common/irq.c
607
q->hw_queue_id = hw_queue_id;
drivers/accel/habanalabs/common/irq.c
608
q->ci = 0;
drivers/accel/habanalabs/common/irq.c
609
q->pi = 0;
drivers/accel/habanalabs/common/irq.c
611
atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
drivers/accel/habanalabs/common/irq.c
624
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
drivers/accel/habanalabs/common/irq.c
626
hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
drivers/accel/habanalabs/common/irq.c
629
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
drivers/accel/habanalabs/common/irq.c
631
q->ci = 0;
drivers/accel/habanalabs/common/irq.c
632
q->pi = 0;
drivers/accel/habanalabs/common/irq.c
634
atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
drivers/accel/habanalabs/common/irq.c
643
memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
drivers/accel/habanalabs/common/irq.c
655
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
drivers/accel/habanalabs/common/irq.c
660
p = hl_cpu_accessible_dma_pool_alloc(hdev, size, &q->bus_address);
drivers/accel/habanalabs/common/irq.c
664
q->hdev = hdev;
drivers/accel/habanalabs/common/irq.c
665
q->kernel_address = p;
drivers/accel/habanalabs/common/irq.c
666
q->size = size;
drivers/accel/habanalabs/common/irq.c
667
q->ci = 0;
drivers/accel/habanalabs/common/irq.c
668
q->prev_eqe_index = 0;
drivers/accel/habanalabs/common/irq.c
681
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
drivers/accel/habanalabs/common/irq.c
685
hl_cpu_accessible_dma_pool_free(hdev, q->size, q->kernel_address);
drivers/accel/habanalabs/common/irq.c
688
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
drivers/accel/habanalabs/common/irq.c
690
q->ci = 0;
drivers/accel/habanalabs/common/irq.c
691
q->prev_eqe_index = 0;
drivers/accel/habanalabs/common/irq.c
700
memset(q->kernel_address, 0, q->size);
drivers/accel/habanalabs/common/irq.c
703
void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q)
drivers/accel/habanalabs/common/irq.c
711
eqe_size = q->size / HL_EQ_LENGTH;
drivers/accel/habanalabs/common/irq.c
715
for (i = 0, ptr = q->kernel_address ; i < eq_length ; ++i, ptr += eqe_size) {
drivers/accel/habanalabs/gaudi/gaudi.c
1095
struct hl_hw_queue *q;
drivers/accel/habanalabs/gaudi/gaudi.c
1105
q = &hdev->kernel_queues[queue_id + (4 * i)];
drivers/accel/habanalabs/gaudi/gaudi.c
1106
q->sync_stream_prop.collective_sob_id = sob_id + i;
drivers/accel/habanalabs/gaudi/gaudi.c
1113
q = &hdev->kernel_queues[queue_id];
drivers/accel/habanalabs/gaudi/gaudi.c
1114
q->sync_stream_prop.collective_sob_id =
drivers/accel/habanalabs/gaudi/gaudi.c
1118
q = &hdev->kernel_queues[queue_id];
drivers/accel/habanalabs/gaudi/gaudi.c
1119
q->sync_stream_prop.collective_sob_id =
drivers/accel/habanalabs/gaudi/gaudi.c
1749
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
1753
q = &gaudi->internal_qmans[i];
drivers/accel/habanalabs/gaudi/gaudi.c
1754
if (!q->pq_kernel_addr)
drivers/accel/habanalabs/gaudi/gaudi.c
1756
hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr);
drivers/accel/habanalabs/gaudi/gaudi.c
1763
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
1770
q = &gaudi->internal_qmans[i];
drivers/accel/habanalabs/gaudi/gaudi.c
1774
q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
drivers/accel/habanalabs/gaudi/gaudi.c
1777
q->pq_size = MME_QMAN_SIZE_IN_BYTES;
drivers/accel/habanalabs/gaudi/gaudi.c
1780
q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
drivers/accel/habanalabs/gaudi/gaudi.c
1783
q->pq_size = NIC_QMAN_SIZE_IN_BYTES;
drivers/accel/habanalabs/gaudi/gaudi.c
1791
q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr,
drivers/accel/habanalabs/gaudi/gaudi.c
1793
if (!q->pq_kernel_addr) {
drivers/accel/habanalabs/gaudi/gaudi.c
2686
struct hl_hw_queue *q;
drivers/accel/habanalabs/gaudi/gaudi.c
2709
q = &hdev->kernel_queues[q_idx];
drivers/accel/habanalabs/gaudi/gaudi.c
2710
q->cq_id = cq_id++;
drivers/accel/habanalabs/gaudi/gaudi.c
2711
q->msi_vec = nic_skip + cpu_skip + msi_vec++;
drivers/accel/habanalabs/gaudi/gaudi.c
2713
q->bus_address);
drivers/accel/habanalabs/gaudi/gaudi.c
2832
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
2849
q = &gaudi->internal_qmans[internal_q_index];
drivers/accel/habanalabs/gaudi/gaudi.c
2850
qman_base_addr = (u64) q->pq_dma_addr;
drivers/accel/habanalabs/gaudi/gaudi.c
2956
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
2973
q = &gaudi->internal_qmans[internal_q_index];
drivers/accel/habanalabs/gaudi/gaudi.c
2974
qman_base_addr = (u64) q->pq_dma_addr;
drivers/accel/habanalabs/gaudi/gaudi.c
3101
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
3118
q = &gaudi->internal_qmans[internal_q_index];
drivers/accel/habanalabs/gaudi/gaudi.c
3119
qman_base_addr = (u64) q->pq_dma_addr;
drivers/accel/habanalabs/gaudi/gaudi.c
3234
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
3264
q = &gaudi->internal_qmans[internal_q_index];
drivers/accel/habanalabs/gaudi/gaudi.c
3265
qman_base_addr = (u64) q->pq_dma_addr;
drivers/accel/habanalabs/gaudi/gaudi.c
4679
struct gaudi_internal_qman_info *q;
drivers/accel/habanalabs/gaudi/gaudi.c
4687
q = &gaudi->internal_qmans[queue_id];
drivers/accel/habanalabs/gaudi/gaudi.c
4688
*dma_handle = q->pq_dma_addr;
drivers/accel/habanalabs/gaudi/gaudi.c
4689
*queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE;
drivers/accel/habanalabs/gaudi/gaudi.c
4691
return q->pq_kernel_addr;
drivers/accel/habanalabs/gaudi/gaudi.c
6855
struct hl_hw_queue *q;
drivers/accel/habanalabs/gaudi/gaudi.c
6859
q = &hdev->kernel_queues[qid_base + stream];
drivers/accel/habanalabs/gaudi/gaudi.c
6865
queue_len = (q->queue_type == QUEUE_TYPE_INT) ?
drivers/accel/habanalabs/gaudi/gaudi.c
6866
q->int_queue_len : HL_QUEUE_LENGTH;
drivers/accel/habanalabs/gaudi/gaudi.c
6883
bd = q->kernel_address;
drivers/accel/habanalabs/gaudi/gaudi.c
7283
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
drivers/accel/habanalabs/gaudi/gaudi.c
7286
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
drivers/accel/habanalabs/gaudi2/gaudi2.c
5371
struct hl_hw_queue *q;
drivers/accel/habanalabs/gaudi2/gaudi2.c
5375
q = &hdev->kernel_queues[queue_id_base + pq_id];
drivers/accel/habanalabs/gaudi2/gaudi2.c
5378
if (q->dram_bd) {
drivers/accel/habanalabs/gaudi2/gaudi2.c
5380
lower_32_bits(q->pq_dram_address));
drivers/accel/habanalabs/gaudi2/gaudi2.c
5382
upper_32_bits(q->pq_dram_address));
drivers/accel/habanalabs/gaudi2/gaudi2.c
5385
lower_32_bits(q->bus_address));
drivers/accel/habanalabs/gaudi2/gaudi2.c
5387
upper_32_bits(q->bus_address));
drivers/accel/habanalabs/gaudi2/gaudi2.c
9869
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
drivers/accel/habanalabs/gaudi2/gaudi2.c
9874
q->pi, atomic_read(&q->ci));
drivers/accel/habanalabs/gaudi2/gaudi2.c
9946
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
drivers/accel/habanalabs/gaudi2/gaudi2.c
9950
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
drivers/accel/habanalabs/gaudi2/gaudi2.c
9956
struct hl_engine_arc_dccm_queue_full_irq *q;
drivers/accel/habanalabs/gaudi2/gaudi2.c
9966
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
drivers/accel/habanalabs/gaudi2/gaudi2.c
9970
GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
drivers/accel/habanalabs/goya/goya.c
1169
struct hl_hw_queue *q;
drivers/accel/habanalabs/goya/goya.c
1175
q = &hdev->kernel_queues[0];
drivers/accel/habanalabs/goya/goya.c
1177
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
drivers/accel/habanalabs/goya/goya.c
1178
q->cq_id = q->msi_vec = i;
drivers/accel/habanalabs/goya/goya.c
1179
goya_init_dma_qman(hdev, i, q->bus_address);
drivers/accel/habanalabs/goya/goya.c
4469
struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
drivers/accel/habanalabs/goya/goya.c
4472
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
drivers/accel/ivpu/ivpu_mmu.c
373
struct ivpu_mmu_queue *q = &mmu->cmdq;
drivers/accel/ivpu/ivpu_mmu.c
375
q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
drivers/accel/ivpu/ivpu_mmu.c
376
if (!q->base)
drivers/accel/ivpu/ivpu_mmu.c
379
q->dma_q = IVPU_MMU_Q_BASE_RWA;
drivers/accel/ivpu/ivpu_mmu.c
380
q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
drivers/accel/ivpu/ivpu_mmu.c
381
q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
drivers/accel/ivpu/ivpu_mmu.c
384
&q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
drivers/accel/ivpu/ivpu_mmu.c
392
struct ivpu_mmu_queue *q = &mmu->evtq;
drivers/accel/ivpu/ivpu_mmu.c
394
q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
drivers/accel/ivpu/ivpu_mmu.c
395
if (!q->base)
drivers/accel/ivpu/ivpu_mmu.c
398
q->dma_q = IVPU_MMU_Q_BASE_RWA;
drivers/accel/ivpu/ivpu_mmu.c
399
q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
drivers/accel/ivpu/ivpu_mmu.c
400
q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
drivers/accel/ivpu/ivpu_mmu.c
403
&q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
drivers/accel/ivpu/ivpu_mmu.c
478
static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
drivers/accel/ivpu/ivpu_mmu.c
480
return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
drivers/accel/ivpu/ivpu_mmu.c
481
(IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
drivers/accel/ivpu/ivpu_mmu.c
484
static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
drivers/accel/ivpu/ivpu_mmu.c
486
return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
drivers/accel/ivpu/ivpu_mmu.c
487
(IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
drivers/accel/ivpu/ivpu_mmu.c
512
struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
drivers/accel/ivpu/ivpu_mmu.c
523
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
drivers/accel/ivpu/ivpu_mmu.c
524
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
drivers/acpi/ec.c
1154
struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
drivers/acpi/ec.c
1155
struct acpi_ec_query_handler *handler = q->handler;
drivers/acpi/ec.c
1156
struct acpi_ec *ec = q->ec;
drivers/acpi/ec.c
1172
kfree(q);
drivers/acpi/ec.c
1177
struct acpi_ec_query *q;
drivers/acpi/ec.c
1180
q = kzalloc_obj(struct acpi_ec_query);
drivers/acpi/ec.c
1181
if (!q)
drivers/acpi/ec.c
1184
INIT_WORK(&q->work, acpi_ec_event_processor);
drivers/acpi/ec.c
1185
t = &q->transaction;
drivers/acpi/ec.c
1189
q->ec = ec;
drivers/acpi/ec.c
1190
return q;
drivers/acpi/ec.c
1195
struct acpi_ec_query *q;
drivers/acpi/ec.c
1199
q = acpi_ec_create_query(ec, &value);
drivers/acpi/ec.c
1200
if (!q)
drivers/acpi/ec.c
1208
result = acpi_ec_transaction(ec, &q->transaction);
drivers/acpi/ec.c
1217
q->handler = acpi_ec_get_query_handler_by_value(ec, value);
drivers/acpi/ec.c
1218
if (!q->handler) {
drivers/acpi/ec.c
1235
queue_work(ec_query_wq, &q->work);
drivers/acpi/ec.c
1242
kfree(q);
drivers/ata/libata-pata-timings.c
61
struct ata_timing *q, int T, int UT)
drivers/ata/libata-pata-timings.c
63
q->setup = EZ(t->setup, T);
drivers/ata/libata-pata-timings.c
64
q->act8b = EZ(t->act8b, T);
drivers/ata/libata-pata-timings.c
65
q->rec8b = EZ(t->rec8b, T);
drivers/ata/libata-pata-timings.c
66
q->cyc8b = EZ(t->cyc8b, T);
drivers/ata/libata-pata-timings.c
67
q->active = EZ(t->active, T);
drivers/ata/libata-pata-timings.c
68
q->recover = EZ(t->recover, T);
drivers/ata/libata-pata-timings.c
69
q->dmack_hold = EZ(t->dmack_hold, T);
drivers/ata/libata-pata-timings.c
70
q->cycle = EZ(t->cycle, T);
drivers/ata/libata-pata-timings.c
71
q->udma = EZ(t->udma, UT);
drivers/block/amiflop.c
1509
struct amiga_floppy_struct *floppy = rq->q->disk->private_data;
drivers/block/aoe/aoecmd.c
1045
struct request_queue *q;
drivers/block/aoe/aoecmd.c
1048
q = d->blkq;
drivers/block/aoe/aoecmd.c
1062
blk_mq_run_hw_queues(q, true);
drivers/block/aoe/aoecmd.c
846
struct request_queue *q;
drivers/block/aoe/aoecmd.c
851
q = d->blkq;
drivers/block/aoe/aoecmd.c
852
if (q == NULL)
drivers/block/aoe/aoenet.c
75
register char *p, *q;
drivers/block/aoe/aoenet.c
82
for (; *p; p = q + strspn(q, WHITESPACE)) {
drivers/block/aoe/aoenet.c
83
q = p + strcspn(p, WHITESPACE);
drivers/block/aoe/aoenet.c
84
if (q != p)
drivers/block/aoe/aoenet.c
85
len = q - p;
drivers/block/aoe/aoenet.c
91
if (q == p)
drivers/block/ataflop.c
1507
struct atari_floppy_struct *floppy = bd->rq->q->disk->private_data;
drivers/block/ataflop.c
1544
set_capacity(bd->rq->q->disk, UDT->blocks);
drivers/block/ataflop.c
1564
set_capacity(bd->rq->q->disk, UDT->blocks);
drivers/block/ataflop.c
745
struct request_queue *q;
drivers/block/ataflop.c
761
q = unit[drive].disk[type]->queue;
drivers/block/ataflop.c
762
memflags = blk_mq_freeze_queue(q);
drivers/block/ataflop.c
763
blk_mq_quiesce_queue(q);
drivers/block/ataflop.c
820
blk_mq_unquiesce_queue(q);
drivers/block/ataflop.c
821
blk_mq_unfreeze_queue(q, memflags);
drivers/block/drbd/drbd_int.h
1791
drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
drivers/block/drbd/drbd_int.h
1794
spin_lock_irqsave(&q->q_lock, flags);
drivers/block/drbd/drbd_int.h
1795
list_add_tail(&w->list, &q->q);
drivers/block/drbd/drbd_int.h
1796
spin_unlock_irqrestore(&q->q_lock, flags);
drivers/block/drbd/drbd_int.h
1797
wake_up(&q->q_wait);
drivers/block/drbd/drbd_int.h
1801
drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
drivers/block/drbd/drbd_int.h
1804
spin_lock_irqsave(&q->q_lock, flags);
drivers/block/drbd/drbd_int.h
1806
list_add_tail(&w->list, &q->q);
drivers/block/drbd/drbd_int.h
1807
spin_unlock_irqrestore(&q->q_lock, flags);
drivers/block/drbd/drbd_int.h
1808
wake_up(&q->q_wait);
drivers/block/drbd/drbd_int.h
1817
struct drbd_work_queue *q = &connection->sender_work;
drivers/block/drbd/drbd_int.h
1819
wake_up(&q->q_wait);
drivers/block/drbd/drbd_int.h
489
struct list_head q;
drivers/block/drbd/drbd_main.c
2046
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
drivers/block/drbd/drbd_main.c
2351
INIT_LIST_HEAD(&wq->q);
drivers/block/drbd/drbd_main.c
912
struct request_queue *q = bdev_get_queue(bdev);
drivers/block/drbd/drbd_main.c
919
max_bio_size = queue_max_hw_sectors(q) << 9;
drivers/block/drbd/drbd_main.c
932
struct request_queue *q = device->rq_queue;
drivers/block/drbd/drbd_main.c
935
cpu_to_be32(queue_physical_block_size(q));
drivers/block/drbd/drbd_main.c
937
cpu_to_be32(queue_logical_block_size(q));
drivers/block/drbd/drbd_main.c
939
p->qlim->io_min = cpu_to_be32(queue_io_min(q));
drivers/block/drbd/drbd_main.c
940
p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
drivers/block/drbd/drbd_nl.c
1264
struct request_queue * const q = device->rq_queue;
drivers/block/drbd/drbd_nl.c
1265
unsigned int now = queue_max_hw_sectors(q) << 9;
drivers/block/drbd/drbd_nl.c
1293
lim = queue_limits_start_update(q);
drivers/block/drbd/drbd_nl.c
1359
if (queue_limits_commit_update(q, &lim))
drivers/block/drbd/drbd_worker.c
2062
list_splice_tail_init(&queue->q, work_list);
drivers/block/drbd/drbd_worker.c
2099
if (!list_empty(&connection->sender_work.q))
drivers/block/drbd/drbd_worker.c
2100
list_splice_tail_init(&connection->sender_work.q, work_list);
drivers/block/floppy.c
2261
unsigned int drive = (unsigned long)req->q->disk->private_data;
drivers/block/floppy.c
2550
set_fdc((long)current_req->q->disk->private_data);
drivers/block/floppy.c
2792
drive = (long)current_req->q->disk->private_data;
drivers/block/loop.c
1856
struct loop_device *lo = rq->q->queuedata;
drivers/block/loop.c
1900
struct loop_device *lo = rq->q->queuedata;
drivers/block/loop.c
1941
if (likely(!blk_should_fake_timeout(rq->q)))
drivers/block/loop.c
328
if (likely(!blk_should_fake_timeout(rq->q)))
drivers/block/mtip32xx/mtip32xx.c
2422
struct driver_data *dd = rq->q->queuedata;
drivers/block/mtip32xx/mtip32xx.c
3359
struct driver_data *dd = req->q->queuedata;
drivers/block/mtip32xx/mtip32xx.c
483
if (likely(!blk_should_fake_timeout(req->q)))
drivers/block/nbd.c
1002
percpu_ref_put(&q->q_usage_counter);
drivers/block/nbd.c
1007
if (likely(!blk_should_fake_timeout(rq->q))) {
drivers/block/nbd.c
1017
percpu_ref_put(&q->q_usage_counter);
drivers/block/nbd.c
977
struct request_queue *q = nbd->disk->queue;
drivers/block/nbd.c
994
if (!percpu_ref_tryget(&q->q_usage_counter)) {
drivers/block/null_blk/main.c
1326
blk_mq_stop_hw_queues(nullb->q);
drivers/block/null_blk/main.c
1329
blk_mq_start_stopped_hw_queues(nullb->q, true);
drivers/block/null_blk/main.c
1477
blk_mq_start_stopped_hw_queues(nullb->q, true);
drivers/block/null_blk/main.c
1671
blk_should_fake_timeout(rq->q);
drivers/block/null_blk/main.c
1776
blk_mq_start_stopped_hw_queues(nullb->q, true);
drivers/block/null_blk/main.c
2013
nullb->q = nullb->disk->queue;
drivers/block/null_blk/main.c
2020
nullb->q->queuedata = nullb;
drivers/block/null_blk/null_blk.h
119
struct request_queue *q;
drivers/block/null_blk/trace.h
53
blk_mq_rq_from_pdu(cmd)->q->disk);
drivers/block/null_blk/zoned.c
177
struct request_queue *q = nullb->q;
drivers/block/null_blk/zoned.c
182
queue_emulates_zone_append(q) ? "emulated" : "native");
drivers/block/ps3disk.c
187
struct request_queue *q = hctx->queue;
drivers/block/ps3disk.c
188
struct ps3_storage_device *dev = q->queuedata;
drivers/block/rnbd/rnbd-clt.c
1054
struct rnbd_queue *q)
drivers/block/rnbd/rnbd-clt.c
1065
if (!test_and_set_bit_lock(0, &q->in_list)) {
drivers/block/rnbd/rnbd-clt.c
1066
if (WARN_ON(!list_empty(&q->requeue_list)))
drivers/block/rnbd/rnbd-clt.c
1078
list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
drivers/block/rnbd/rnbd-clt.c
1086
clear_bit_unlock(0, &q->in_list);
drivers/block/rnbd/rnbd-clt.c
1101
struct rnbd_queue *q = hctx->driver_data;
drivers/block/rnbd/rnbd-clt.c
1105
else if (!rnbd_clt_dev_add_to_requeue(dev, q))
drivers/block/rnbd/rnbd-clt.c
1117
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
drivers/block/rnbd/rnbd-clt.c
1162
struct rnbd_queue *q = hctx->driver_data;
drivers/block/rnbd/rnbd-clt.c
1163
struct rnbd_clt_dev *dev = q->dev;
drivers/block/rnbd/rnbd-clt.c
1312
struct rnbd_queue *q,
drivers/block/rnbd/rnbd-clt.c
1315
INIT_LIST_HEAD(&q->requeue_list);
drivers/block/rnbd/rnbd-clt.c
1316
q->dev = dev;
drivers/block/rnbd/rnbd-clt.c
1317
q->hctx = hctx;
drivers/block/rnbd/rnbd-clt.c
1324
struct rnbd_queue *q;
drivers/block/rnbd/rnbd-clt.c
1327
q = &dev->hw_queues[i];
drivers/block/rnbd/rnbd-clt.c
1328
rnbd_init_hw_queue(dev, q, hctx);
drivers/block/rnbd/rnbd-clt.c
1329
hctx->driver_data = q;
drivers/block/rnbd/rnbd-clt.c
139
static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
drivers/block/rnbd/rnbd-clt.c
141
if (WARN_ON(!q->hctx))
drivers/block/rnbd/rnbd-clt.c
145
blk_mq_run_hw_queue(q->hctx, true);
drivers/block/rnbd/rnbd-clt.c
203
struct rnbd_queue *q = NULL;
drivers/block/rnbd/rnbd-clt.c
220
q = list_first_entry_or_null(&cpu_q->requeue_list,
drivers/block/rnbd/rnbd-clt.c
221
typeof(*q), requeue_list);
drivers/block/rnbd/rnbd-clt.c
222
if (WARN_ON(!q))
drivers/block/rnbd/rnbd-clt.c
224
list_del_init(&q->requeue_list);
drivers/block/rnbd/rnbd-clt.c
225
clear_bit_unlock(0, &q->in_list);
drivers/block/rnbd/rnbd-clt.c
235
if (q)
drivers/block/rnbd/rnbd-clt.c
250
if (q)
drivers/block/rnbd/rnbd-clt.c
251
rnbd_clt_dev_requeue(q);
drivers/block/rnbd/rnbd-clt.c
253
return q;
drivers/block/rnbd/rnbd-clt.c
370
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
drivers/block/sunvdc.c
1116
struct request_queue *q = port->disk->queue;
drivers/block/sunvdc.c
1126
memflags = blk_mq_freeze_queue(q);
drivers/block/sunvdc.c
1127
blk_mq_quiesce_queue(q);
drivers/block/sunvdc.c
1131
blk_mq_unquiesce_queue(q);
drivers/block/sunvdc.c
1132
blk_mq_unfreeze_queue(q, memflags);
drivers/block/sunvdc.c
463
struct vdc_port *port = req->q->disk->private_data;
drivers/block/sunvdc.c
795
struct request_queue *q;
drivers/block/sunvdc.c
844
q = g->queue;
drivers/block/sunvdc.c
852
g->queue = q;
drivers/block/swim3.c
842
struct request_queue *q = disks[fs->index]->queue;
drivers/block/swim3.c
852
memflags = blk_mq_freeze_queue(q);
drivers/block/swim3.c
853
blk_mq_quiesce_queue(q);
drivers/block/swim3.c
854
blk_mq_unquiesce_queue(q);
drivers/block/swim3.c
855
blk_mq_unfreeze_queue(q, memflags);
drivers/block/ublk_drv.c
1537
else if (likely(!blk_should_fake_timeout(req->q))) {
drivers/block/ublk_drv.c
1679
blk_mq_delay_kick_requeue_list(req->q,
drivers/block/ublk_drv.c
3493
int (*cb)(struct ublk_queue *q,
drivers/block/ublk_drv.c
3520
int (*cb)(struct ublk_queue *q,
drivers/block/ublk_drv.c
366
static inline int ublk_io_evts_init(struct ublk_queue *q, unsigned int size,
drivers/block/ublk_drv.c
369
spin_lock_init(&q->evts_lock);
drivers/block/ublk_drv.c
370
return kfifo_alloc_node(&q->evts_fifo, size, GFP_KERNEL, numa_node);
drivers/block/ublk_drv.c
374
static inline bool ublk_io_evts_empty(const struct ublk_queue *q)
drivers/block/ublk_drv.c
376
return kfifo_is_empty(&q->evts_fifo);
drivers/block/ublk_drv.c
379
static inline void ublk_io_evts_deinit(struct ublk_queue *q)
drivers/block/ublk_drv.c
381
WARN_ON_ONCE(!kfifo_is_empty(&q->evts_fifo));
drivers/block/ublk_drv.c
382
kfifo_free(&q->evts_fifo);
drivers/block/ublk_drv.c
3988
struct blk_integrity *bi = &req->q->limits.integrity;
drivers/block/ublk_drv.c
509
struct request_queue *q = ublk->ub_disk->queue;
drivers/block/ublk_drv.c
518
min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
drivers/block/virtio_blk.c
1591
struct request_queue *q = vblk->disk->queue;
drivers/block/virtio_blk.c
1595
memflags = blk_mq_freeze_queue(q);
drivers/block/virtio_blk.c
1596
blk_mq_quiesce_queue_nowait(q);
drivers/block/virtio_blk.c
1597
blk_mq_unfreeze_queue(q, memflags);
drivers/block/virtio_blk.c
181
if (queue_max_discard_segments(req->q) == 1) {
drivers/block/virtio_blk.c
365
if (likely(!blk_should_fake_timeout(req->q)))
drivers/block/virtio_blk.c
531
struct request_queue *q = vblk->disk->queue;
drivers/block/virtio_blk.c
541
queue_max_hw_sectors(q) << SECTOR_SHIFT);
drivers/block/virtio_blk.c
542
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
drivers/block/virtio_blk.c
560
struct request_queue *q = vblk->disk->queue;
drivers/block/virtio_blk.c
565
req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
drivers/block/virtio_blk.c
807
struct request_queue *q = vblk->disk->queue;
drivers/block/virtio_blk.c
812
req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
drivers/block/virtio_blk.c
916
struct request_queue *q = vblk->disk->queue;
drivers/block/virtio_blk.c
924
nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
drivers/block/virtio_blk.c
926
string_get_size(nblocks, queue_logical_block_size(q),
drivers/block/virtio_blk.c
928
string_get_size(nblocks, queue_logical_block_size(q),
drivers/block/virtio_blk.c
936
queue_logical_block_size(q),
drivers/block/xen-blkfront.c
1646
if (likely(!blk_should_fake_timeout(req->q)))
drivers/block/zloop.c
151
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
369
if (likely(!blk_should_fake_timeout(rq->q)))
drivers/block/zloop.c
384
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
563
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
620
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
677
struct zloop_device *zlo = rq->q->queuedata;
drivers/block/zloop.c
709
struct zloop_device *zlo = rq->q->queuedata;
drivers/bus/ti-sysc.c
1669
const struct sysc_revision_quirk *q;
drivers/bus/ti-sysc.c
1673
q = &sysc_revision_quirks[i];
drivers/bus/ti-sysc.c
1675
if (!q->base)
drivers/bus/ti-sysc.c
1678
if (q->base != ddata->module_pa)
drivers/bus/ti-sysc.c
1681
if (q->rev_offset != ddata->offsets[SYSC_REVISION])
drivers/bus/ti-sysc.c
1684
if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
drivers/bus/ti-sysc.c
1687
if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
drivers/bus/ti-sysc.c
1690
ddata->name = q->name;
drivers/bus/ti-sysc.c
1691
ddata->cfg.quirks |= q->quirks;
drivers/bus/ti-sysc.c
1698
const struct sysc_revision_quirk *q;
drivers/bus/ti-sysc.c
1702
q = &sysc_revision_quirks[i];
drivers/bus/ti-sysc.c
1704
if (q->base && q->base != ddata->module_pa)
drivers/bus/ti-sysc.c
1707
if (q->rev_offset != ddata->offsets[SYSC_REVISION])
drivers/bus/ti-sysc.c
1710
if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
drivers/bus/ti-sysc.c
1713
if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
drivers/bus/ti-sysc.c
1716
if (q->revision == ddata->revision ||
drivers/bus/ti-sysc.c
1717
(q->revision & q->revision_mask) ==
drivers/bus/ti-sysc.c
1718
(ddata->revision & q->revision_mask)) {
drivers/bus/ti-sysc.c
1719
ddata->name = q->name;
drivers/bus/ti-sysc.c
1720
ddata->cfg.quirks |= q->quirks;
drivers/cdrom/cdrom.c
2578
struct cdrom_subchnl q;
drivers/cdrom/cdrom.c
2584
if (copy_from_user(&q, argp, sizeof(q)))
drivers/cdrom/cdrom.c
2587
requested = q.cdsc_format;
drivers/cdrom/cdrom.c
2590
q.cdsc_format = CDROM_MSF;
drivers/cdrom/cdrom.c
2592
ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q);
drivers/cdrom/cdrom.c
2596
back = q.cdsc_format; /* local copy */
drivers/cdrom/cdrom.c
2597
sanitize_format(&q.cdsc_absaddr, &back, requested);
drivers/cdrom/cdrom.c
2598
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
drivers/cdrom/cdrom.c
2600
if (copy_to_user(argp, &q, sizeof(q)))
drivers/cdrom/cdrom.c
3035
struct cdrom_subchnl q;
drivers/cdrom/cdrom.c
3037
if (copy_from_user(&q, (struct cdrom_subchnl __user *)arg, sizeof(q)))
drivers/cdrom/cdrom.c
3039
requested = q.cdsc_format;
drivers/cdrom/cdrom.c
3044
ret = cdrom_read_subchannel(cdi, &q, 0);
drivers/cdrom/cdrom.c
3047
back = q.cdsc_format; /* local copy */
drivers/cdrom/cdrom.c
3048
sanitize_format(&q.cdsc_absaddr, &back, requested);
drivers/cdrom/cdrom.c
3049
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
drivers/cdrom/cdrom.c
3050
if (copy_to_user((struct cdrom_subchnl __user *)arg, &q, sizeof(q)))
drivers/char/apm-emulation.c
155
static inline int queue_empty(struct apm_queue *q)
drivers/char/apm-emulation.c
157
return q->event_head == q->event_tail;
drivers/char/apm-emulation.c
160
static inline apm_event_t queue_get_event(struct apm_queue *q)
drivers/char/apm-emulation.c
162
q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
drivers/char/apm-emulation.c
163
return q->events[q->event_tail];
drivers/char/apm-emulation.c
166
static void queue_add_event(struct apm_queue *q, apm_event_t event)
drivers/char/apm-emulation.c
168
q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
drivers/char/apm-emulation.c
169
if (q->event_head == q->event_tail) {
drivers/char/apm-emulation.c
174
q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
drivers/char/apm-emulation.c
176
q->events[q->event_head] = event;
drivers/char/ipmi/ipmi_msghandler.c
705
static void free_recv_msg_list(struct list_head *q)
drivers/char/ipmi/ipmi_msghandler.c
709
list_for_each_entry_safe(msg, msg2, q, link) {
drivers/char/ipmi/ipmi_msghandler.c
715
static void free_smi_msg_list(struct list_head *q)
drivers/char/ipmi/ipmi_msghandler.c
719
list_for_each_entry_safe(msg, msg2, q, link) {
drivers/clk/clk-cdce925.c
210
u8 q;
drivers/clk/clk-cdce925.c
228
q = nn / m;
drivers/clk/clk-cdce925.c
229
if ((q < 16) || (q > 63)) {
drivers/clk/clk-cdce925.c
230
pr_debug("%s invalid q=%d\n", __func__, q);
drivers/clk/clk-cdce925.c
233
r = nn - (m*q);
drivers/clk/clk-cdce925.c
239
n, m, p, q, r);
drivers/clk/clk-cdce925.c
243
pll[2] = ((r & 0x1F) << 3) | ((q >> 3) & 0x07);
drivers/clk/clk-cdce925.c
244
pll[3] = ((q & 0x07) << 5) | (p << 2) |
drivers/clk/clk.c
3255
bool clk_is_match(const struct clk *p, const struct clk *q)
drivers/clk/clk.c
3258
if (p == q)
drivers/clk/clk.c
3262
if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
drivers/clk/clk.c
3263
if (p->core == q->core)
drivers/counter/counter-chrdev.c
123
struct counter_comp_node *q, *o;
drivers/counter/counter-chrdev.c
127
list_for_each_entry_safe(q, o, &p->comp_list, l) {
drivers/counter/counter-chrdev.c
128
list_del(&q->l);
drivers/counter/counter-chrdev.c
129
kfree(q);
drivers/cpufreq/amd_freq_sensitivity.c
104
data->actual = actual.q;
drivers/cpufreq/amd_freq_sensitivity.c
105
data->reference = reference.q;
drivers/cpufreq/amd_freq_sensitivity.c
62
if (actual.q < data->actual || reference.q < data->reference) {
drivers/cpufreq/amd_freq_sensitivity.c
67
d_actual = actual.q - data->actual;
drivers/cpufreq/amd_freq_sensitivity.c
68
d_reference = reference.q - data->reference;
drivers/crypto/caam/caampkc.c
1000
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
drivers/crypto/caam/caampkc.c
1001
if (!rsa_key->q)
drivers/crypto/caam/caampkc.c
1043
kfree_sensitive(rsa_key->q);
drivers/crypto/caam/caampkc.c
521
pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
drivers/crypto/caam/caampkc.c
592
pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
drivers/crypto/caam/caampkc.c
870
kfree_sensitive(key->q);
drivers/crypto/caam/caampkc.h
74
u8 *q;
drivers/crypto/caam/pdb.h
457
u8 *q;
drivers/crypto/caam/pdb.h
470
u8 *q;
drivers/crypto/cavium/cpt/cptpf_mbox.c
59
static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
drivers/crypto/cavium/cpt/cptpf_mbox.c
65
if (q >= CPT_MAX_VF_NUM) {
drivers/crypto/cavium/cpt/cptpf_mbox.c
77
pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q));
drivers/crypto/cavium/cpt/cptpf_mbox.c
79
cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u);
drivers/crypto/cavium/cpt/cptpf_mbox.c
80
dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE"));
drivers/crypto/cavium/cpt/cptvf.h
90
#define for_each_pending_queue(qinfo, q, i) \
drivers/crypto/cavium/cpt/cptvf.h
91
for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
drivers/crypto/cavium/cpt/cptvf.h
92
q = &qinfo->queue[i])
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
15
static struct pending_entry *get_free_pending_entry(struct pending_queue *q,
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
20
ent = &q->head[q->rear];
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
26
q->rear++;
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
27
if (unlikely(q->rear == qlen))
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
28
q->rear = 0;
drivers/crypto/ccp/ccp-ops.c
226
u8 *p, *q;
drivers/crypto/ccp/ccp-ops.c
234
q = p + len - 1;
drivers/crypto/ccp/ccp-ops.c
235
while (p < q) {
drivers/crypto/ccp/ccp-ops.c
236
*p = *p ^ *q;
drivers/crypto/ccp/ccp-ops.c
237
*q = *p ^ *q;
drivers/crypto/ccp/ccp-ops.c
238
*p = *p ^ *q;
drivers/crypto/ccp/ccp-ops.c
240
q--;
drivers/crypto/ccp/ccp-ops.c
251
u8 *p, *q;
drivers/crypto/ccp/ccp-ops.c
254
q = p + len - 1;
drivers/crypto/ccp/ccp-ops.c
255
while (p < q) {
drivers/crypto/ccp/ccp-ops.c
256
*p = *p ^ *q;
drivers/crypto/ccp/ccp-ops.c
257
*q = *p ^ *q;
drivers/crypto/ccp/ccp-ops.c
258
*p = *p ^ *q;
drivers/crypto/ccp/ccp-ops.c
260
q--;
drivers/crypto/hisilicon/hpre/hpre_crypto.c
959
rsa_key->q, rsa_key->q_sz);
drivers/crypto/hisilicon/qm.c
2521
struct uacce_queue *q)
drivers/crypto/hisilicon/qm.c
2531
q->priv = qp;
drivers/crypto/hisilicon/qm.c
2532
q->uacce = uacce;
drivers/crypto/hisilicon/qm.c
2533
qp->uacce_q = q;
drivers/crypto/hisilicon/qm.c
2540
static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
drivers/crypto/hisilicon/qm.c
2542
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2548
static int hisi_qm_uacce_mmap(struct uacce_queue *q,
drivers/crypto/hisilicon/qm.c
2552
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2601
static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
drivers/crypto/hisilicon/qm.c
2603
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2608
static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
drivers/crypto/hisilicon/qm.c
2610
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2637
static int hisi_qm_is_q_updated(struct uacce_queue *q)
drivers/crypto/hisilicon/qm.c
2639
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2654
static void qm_set_sqctype(struct uacce_queue *q, u16 type)
drivers/crypto/hisilicon/qm.c
2656
struct hisi_qm *qm = q->uacce->priv;
drivers/crypto/hisilicon/qm.c
2657
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2664
static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
drivers/crypto/hisilicon/qm.c
2667
struct hisi_qp *qp = q->priv;
drivers/crypto/hisilicon/qm.c
2679
qm_set_sqctype(q, qp_ctx.qc_type);
drivers/crypto/hisilicon/sec/sec_drv.c
105
#define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4)
drivers/crypto/hisilicon/sec/sec_drv.c
106
#define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4)
drivers/crypto/hisilicon/sec/sec_drv.c
673
static irqreturn_t sec_isr_handle_th(int irq, void *q)
drivers/crypto/hisilicon/sec/sec_drv.c
675
sec_queue_irq_disable(q);
drivers/crypto/hisilicon/sec/sec_drv.c
679
static irqreturn_t sec_isr_handle(int irq, void *q)
drivers/crypto/hisilicon/sec/sec_drv.c
681
struct sec_queue *queue = q;
drivers/crypto/hisilicon/zip/zip_crypto.c
142
struct hisi_zip_req *q = req_q->q;
drivers/crypto/hisilicon/zip/zip_crypto.c
158
req_cache = q + req_id;
drivers/crypto/hisilicon/zip/zip_crypto.c
482
req_q->q = kzalloc_objs(struct hisi_zip_req, req_q->size);
drivers/crypto/hisilicon/zip/zip_crypto.c
483
if (!req_q->q) {
drivers/crypto/hisilicon/zip/zip_crypto.c
497
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
drivers/crypto/hisilicon/zip/zip_crypto.c
508
kfree(ctx->qp_ctx[i].req_q.q);
drivers/crypto/hisilicon/zip/zip_crypto.c
62
struct hisi_zip_req *q;
drivers/crypto/intel/keembay/ocs-aes.c
1060
int i, q;
drivers/crypto/intel/keembay/ocs-aes.c
1092
q = (iv[0] & 0x7) + 1;
drivers/crypto/intel/keembay/ocs-aes.c
1093
for (i = 1; i <= 15 - q; i++)
drivers/crypto/intel/keembay/ocs-aes.c
1101
while (q) {
drivers/crypto/intel/keembay/ocs-aes.c
1105
q--;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1096
ptr = rsa_key->q;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1101
ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1102
if (!ctx->q)
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1104
memcpy(ctx->q + (half_key_sz - len), ptr, len);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1154
memset(ctx->q, '\0', half_key_sz);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1155
dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1156
ctx->q = NULL;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1182
if (ctx->q) {
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1183
memset(ctx->q, '\0', half_key_sz);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1184
dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
1203
ctx->q = NULL;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
38
dma_addr_t q;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
64
char *q;
drivers/crypto/intel/qat/qat_common/qat_asym_algs.c
875
qat_req->in.rsa.dec_crt.q = ctx->dma_q;
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
135
static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp)
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
142
if (q >= cpt->max_vfs) {
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
144
q, cpt->max_vfs);
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
160
pf_qx_ctl.u = readq(cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c
162
writeq(pf_qx_ctl.u, cpt->reg_base + OTX_CPT_PF_QX_CTL(q));
drivers/crypto/marvell/octeontx/otx_cptvf.h
56
#define for_each_pending_queue(qinfo, q, i) \
drivers/crypto/marvell/octeontx/otx_cptvf.h
57
for (i = 0, q = &qinfo->queue[i]; i < qinfo->num_queues; i++, \
drivers/crypto/marvell/octeontx/otx_cptvf.h
58
q = &qinfo->queue[i])
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
58
struct otx_cpt_pending_queue *q,
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
63
ent = &q->head[q->rear];
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
67
q->rear++;
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
68
if (unlikely(q->rear == qlen))
drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c
69
q->rear = 0;
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
46
struct otx2_cpt_pending_queue *q,
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
51
ent = &q->head[q->rear];
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
55
q->rear++;
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
56
if (unlikely(q->rear == qlen))
drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
57
q->rear = 0;
drivers/crypto/tegra/tegra-se-aes.c
967
unsigned int q, t;
drivers/crypto/tegra/tegra-se-aes.c
983
q = iv[0] + 1;
drivers/crypto/tegra/tegra-se-aes.c
984
q_ptr = nonce + 16 - q;
drivers/crypto/tegra/tegra-se-aes.c
986
return tegra_ccm_set_msg_len(q_ptr, rctx->cryptlen, q);
drivers/cxl/core/core.h
92
struct cxl_mem_query_commands __user *q);
drivers/cxl/core/mbox.c
535
struct cxl_mem_query_commands __user *q)
drivers/cxl/core/mbox.c
544
if (get_user(n_commands, &q->n_commands))
drivers/cxl/core/mbox.c
549
return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
drivers/cxl/core/mbox.c
563
if (copy_to_user(&q->commands[j++], &info, sizeof(info)))
drivers/dma/amd/qdma/qdma.c
19
#define CHAN_STR(q) (((q)->dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H")
drivers/dma/amd/qdma/qdma.c
439
struct qdma_queue *q, **queues;
drivers/dma/amd/qdma/qdma.c
451
*queues = devm_kcalloc(&qdev->pdev->dev, qdev->chan_num, sizeof(*q),
drivers/dma/amd/qdma/qdma.c
461
q = &(*queues)[i];
drivers/dma/amd/qdma/qdma.c
462
q->ring_size = QDMA_DEFAULT_RING_SIZE;
drivers/dma/amd/qdma/qdma.c
463
q->idx_mask = q->ring_size - 2;
drivers/dma/amd/qdma/qdma.c
464
q->qdev = qdev;
drivers/dma/amd/qdma/qdma.c
465
q->dir = dir;
drivers/dma/amd/qdma/qdma.c
466
q->qid = i;
drivers/dma/amd/qdma/qdma.c
467
q->pidx_reg = pidx_base + i * QDMA_DMAP_REG_STRIDE;
drivers/dma/amd/qdma/qdma.c
468
q->cidx_reg = QDMA_REG_OFF(qdev, QDMA_REGO_INTR_CIDX) +
drivers/dma/amd/qdma/qdma.c
470
q->vchan.desc_free = qdma_free_vdesc;
drivers/dma/amd/qdma/qdma.c
471
vchan_init(&q->vchan, &qdev->dma_dev);
drivers/dma/amd/qdma/qdma.c
645
static struct qdma_mm_desc *qdma_get_desc(struct qdma_queue *q)
drivers/dma/amd/qdma/qdma.c
649
if (((q->pidx + 1) & q->idx_mask) == q->cidx)
drivers/dma/amd/qdma/qdma.c
652
desc = q->desc_base + q->pidx;
drivers/dma/amd/qdma/qdma.c
653
q->pidx = (q->pidx + 1) & q->idx_mask;
drivers/dma/amd/qdma/qdma.c
658
static int qdma_hw_enqueue(struct qdma_queue *q, struct qdma_mm_vdesc *vdesc)
drivers/dma/amd/qdma/qdma.c
670
if (q->dir == DMA_MEM_TO_DEV) {
drivers/dma/amd/qdma/qdma.c
683
desc = qdma_get_desc(q);
drivers/dma/amd/qdma/qdma.c
703
vdesc->pidx = q->pidx;
drivers/dma/amd/qdma/qdma.c
707
static void qdma_fill_pending_vdesc(struct qdma_queue *q)
drivers/dma/amd/qdma/qdma.c
709
struct virt_dma_chan *vc = &q->vchan;
drivers/dma/amd/qdma/qdma.c
715
vd = &q->issued_vdesc->vdesc;
drivers/dma/amd/qdma/qdma.c
718
ret = qdma_hw_enqueue(q, vdesc);
drivers/dma/amd/qdma/qdma.c
720
q->issued_vdesc = vdesc;
drivers/dma/amd/qdma/qdma.c
724
q->issued_vdesc = vdesc;
drivers/dma/amd/qdma/qdma.c
730
if (q->submitted_vdesc)
drivers/dma/amd/qdma/qdma.c
731
vd = &q->submitted_vdesc->vdesc;
drivers/dma/amd/qdma/qdma.c
737
ret = qdma_hw_enqueue(q, vdesc);
drivers/dma/amd/qdma/qdma.c
741
q->submitted_vdesc = vdesc;
drivers/dma/amd/qdma/qdma.c
747
struct qdma_queue *q = to_qdma_queue(&vc->chan);
drivers/dma/amd/qdma/qdma.c
757
qdma_fill_pending_vdesc(q);
drivers/dma/amd/qdma/qdma.c
768
struct qdma_queue *q = to_qdma_queue(chan);
drivers/dma/amd/qdma/qdma.c
778
vdesc->dev_addr = q->cfg.dst_addr;
drivers/dma/amd/qdma/qdma.c
780
vdesc->dev_addr = q->cfg.src_addr;
drivers/dma/amd/qdma/qdma.c
782
tx = vchan_tx_prep(&q->vchan, &vdesc->vdesc, flags);
drivers/dma/amd/qdma/qdma.c
791
struct qdma_queue *q = to_qdma_queue(chan);
drivers/dma/amd/qdma/qdma.c
793
memcpy(&q->cfg, cfg, sizeof(*cfg));
drivers/dma/amd/qdma/qdma.c
834
struct qdma_queue *q = NULL;
drivers/dma/amd/qdma/qdma.c
857
q = qdev->c2h_queues;
drivers/dma/amd/qdma/qdma.c
859
q = qdev->h2c_queues;
drivers/dma/amd/qdma/qdma.c
860
q += qid;
drivers/dma/amd/qdma/qdma.c
864
spin_lock_irqsave(&q->vchan.lock, flags);
drivers/dma/amd/qdma/qdma.c
865
comp_desc = (cidx - q->cidx) & q->idx_mask;
drivers/dma/amd/qdma/qdma.c
867
vd = vchan_next_desc(&q->vchan);
drivers/dma/amd/qdma/qdma.c
876
vd = vchan_next_desc(&q->vchan);
drivers/dma/amd/qdma/qdma.c
884
q->cidx = cidx;
drivers/dma/amd/qdma/qdma.c
886
qdma_fill_pending_vdesc(q);
drivers/dma/amd/qdma/qdma.c
887
qdma_xfer_start(q);
drivers/dma/amd/qdma/qdma.c
890
spin_unlock_irqrestore(&q->vchan.lock, flags);
drivers/dma/amd/qdma/qdma.c
906
if (q) {
drivers/dma/amd/qdma/qdma.c
915
ret = qdma_update_cidx(q, intr->ridx, index);
drivers/dma/fsl_raid.c
624
struct device_node *np, u8 q, u32 off)
drivers/dma/fsl_raid.c
646
dev_err(dev, "Not able to create ofdev for jr %d\n", q);
drivers/dma/fsl_raid.c
654
dev_err(dev, "Reg property not found in jr %d\n", q);
drivers/dma/fsl_raid.c
665
dev_err(dev, "No IRQ defined for JR %d\n", q);
drivers/dma/fsl_raid.c
670
snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
drivers/dma/fsl_raid.c
677
dev_err(dev, "Unable to register interrupt for JR %d\n", q);
drivers/dma/fsl_raid.c
682
re_priv->re_jrs[q] = chan;
drivers/edac/amd64_edac.c
3225
cpu, reg->q, str_enabled_disabled(nbe));
drivers/firewire/core-device.c
1235
u32 q;
drivers/firewire/core-device.c
1239
rcode = read_rom(device, generation, device->max_speed, i, &q);
drivers/firewire/core-device.c
1243
if (i == 0 && q == 0)
drivers/firewire/core-device.c
1247
if (q != device->config_rom[i]) {
drivers/firmware/arm_scmi/raw_mode.c
1081
struct scmi_raw_queue *q;
drivers/firmware/arm_scmi/raw_mode.c
1083
q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
drivers/firmware/arm_scmi/raw_mode.c
1084
if (!q)
drivers/firmware/arm_scmi/raw_mode.c
1091
spin_lock_init(&q->free_bufs_lock);
drivers/firmware/arm_scmi/raw_mode.c
1092
INIT_LIST_HEAD(&q->free_bufs);
drivers/firmware/arm_scmi/raw_mode.c
1098
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
1101
spin_lock_init(&q->msg_q_lock);
drivers/firmware/arm_scmi/raw_mode.c
1102
INIT_LIST_HEAD(&q->msg_q);
drivers/firmware/arm_scmi/raw_mode.c
1103
init_waitqueue_head(&q->wq);
drivers/firmware/arm_scmi/raw_mode.c
1105
return q;
drivers/firmware/arm_scmi/raw_mode.c
1150
raw->q[idx] = scmi_raw_queue_init(raw);
drivers/firmware/arm_scmi/raw_mode.c
1151
if (IS_ERR(raw->q[idx])) {
drivers/firmware/arm_scmi/raw_mode.c
1152
ret = PTR_ERR(raw->q[idx]);
drivers/firmware/arm_scmi/raw_mode.c
1162
struct scmi_raw_queue *q;
drivers/firmware/arm_scmi/raw_mode.c
1164
q = scmi_raw_queue_init(raw);
drivers/firmware/arm_scmi/raw_mode.c
1165
if (IS_ERR(q)) {
drivers/firmware/arm_scmi/raw_mode.c
1166
ret = PTR_ERR(q);
drivers/firmware/arm_scmi/raw_mode.c
1170
ret = xa_insert(&raw->chans_q, channels[i], q,
drivers/firmware/arm_scmi/raw_mode.c
1378
struct scmi_raw_queue *q;
drivers/firmware/arm_scmi/raw_mode.c
1385
q = scmi_raw_queue_select(raw, idx,
drivers/firmware/arm_scmi/raw_mode.c
1387
if (!q) {
drivers/firmware/arm_scmi/raw_mode.c
1403
spin_lock_irqsave(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
1404
rb = scmi_raw_buffer_get(q);
drivers/firmware/arm_scmi/raw_mode.c
1413
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
1431
rb = scmi_raw_buffer_dequeue_unlocked(q);
drivers/firmware/arm_scmi/raw_mode.c
1433
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
1444
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
1449
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
1453
scmi_raw_buffer_enqueue(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
178
struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
drivers/firmware/arm_scmi/raw_mode.c
253
return raw->q[idx];
drivers/firmware/arm_scmi/raw_mode.c
258
static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
drivers/firmware/arm_scmi/raw_mode.c
262
struct list_head *head = &q->free_bufs;
drivers/firmware/arm_scmi/raw_mode.c
264
spin_lock_irqsave(&q->free_bufs_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
269
spin_unlock_irqrestore(&q->free_bufs_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
274
static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
drivers/firmware/arm_scmi/raw_mode.c
282
spin_lock_irqsave(&q->free_bufs_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
283
list_add_tail(&rb->node, &q->free_bufs);
drivers/firmware/arm_scmi/raw_mode.c
284
spin_unlock_irqrestore(&q->free_bufs_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
287
static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
drivers/firmware/arm_scmi/raw_mode.c
292
spin_lock_irqsave(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
293
list_add_tail(&rb->node, &q->msg_q);
drivers/firmware/arm_scmi/raw_mode.c
294
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
296
wake_up_interruptible(&q->wq);
drivers/firmware/arm_scmi/raw_mode.c
300
scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
drivers/firmware/arm_scmi/raw_mode.c
304
if (!list_empty(&q->msg_q)) {
drivers/firmware/arm_scmi/raw_mode.c
305
rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
drivers/firmware/arm_scmi/raw_mode.c
312
static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
drivers/firmware/arm_scmi/raw_mode.c
317
spin_lock_irqsave(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
318
rb = scmi_raw_buffer_dequeue_unlocked(q);
drivers/firmware/arm_scmi/raw_mode.c
319
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
324
static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
drivers/firmware/arm_scmi/raw_mode.c
329
rb = scmi_raw_buffer_dequeue(q);
drivers/firmware/arm_scmi/raw_mode.c
331
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
504
scmi_raw_buffer_queue_flush(raw->q[i]);
drivers/firmware/arm_scmi/raw_mode.c
709
scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
drivers/firmware/arm_scmi/raw_mode.c
714
spin_lock_irqsave(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
715
while (list_empty(&q->msg_q)) {
drivers/firmware/arm_scmi/raw_mode.c
716
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
721
if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
drivers/firmware/arm_scmi/raw_mode.c
724
spin_lock_irqsave(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
727
rb = scmi_raw_buffer_dequeue_unlocked(q);
drivers/firmware/arm_scmi/raw_mode.c
729
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
756
struct scmi_raw_queue *q;
drivers/firmware/arm_scmi/raw_mode.c
758
q = scmi_raw_queue_select(raw, idx, chan_id);
drivers/firmware/arm_scmi/raw_mode.c
759
if (!q)
drivers/firmware/arm_scmi/raw_mode.c
762
rb = scmi_raw_message_dequeue(q, o_nonblock);
drivers/firmware/arm_scmi/raw_mode.c
775
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
863
struct scmi_raw_queue *q;
drivers/firmware/arm_scmi/raw_mode.c
866
q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
drivers/firmware/arm_scmi/raw_mode.c
867
if (!q)
drivers/firmware/arm_scmi/raw_mode.c
870
poll_wait(filp, &q->wq, wait);
drivers/firmware/arm_scmi/raw_mode.c
872
spin_lock_irqsave(&q->msg_q_lock, flags);
drivers/firmware/arm_scmi/raw_mode.c
873
if (!list_empty(&q->msg_q))
drivers/firmware/arm_scmi/raw_mode.c
875
spin_unlock_irqrestore(&q->msg_q_lock, flags);
drivers/firmware/dmi_scan.c
675
char __iomem *p, *q;
drivers/firmware/dmi_scan.c
731
for (q = p + 16; q < p + 0x10000; q += 16) {
drivers/firmware/dmi_scan.c
732
memcpy_fromio(buf + 16, q, 16);
drivers/firmware/dmi_scan.c
749
for (q = p; q < p + 0x10000; q += 16) {
drivers/firmware/dmi_scan.c
750
memcpy_fromio(buf + 16, q, 16);
drivers/firmware/efi/libstub/vsprintf.c
102
put_dec_full4(p, q);
drivers/firmware/efi/libstub/vsprintf.c
43
unsigned int q = (r * 0xccd) >> 15;
drivers/firmware/efi/libstub/vsprintf.c
44
*--end = '0' + (r - q * 10);
drivers/firmware/efi/libstub/vsprintf.c
45
r = q;
drivers/firmware/efi/libstub/vsprintf.c
62
unsigned int q = (x * 0x346DC5D7ULL) >> 43;
drivers/firmware/efi/libstub/vsprintf.c
64
put_dec_full4(end, x - q * 10000);
drivers/firmware/efi/libstub/vsprintf.c
65
return q;
drivers/firmware/efi/libstub/vsprintf.c
76
unsigned int d3, d2, d1, q, h;
drivers/firmware/efi/libstub/vsprintf.c
86
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((unsigned int)n & 0xffff);
drivers/firmware/efi/libstub/vsprintf.c
87
q = put_dec_helper4(p, q);
drivers/firmware/efi/libstub/vsprintf.c
90
q += 7671 * d3 + 9496 * d2 + 6 * d1;
drivers/firmware/efi/libstub/vsprintf.c
91
q = put_dec_helper4(p, q);
drivers/firmware/efi/libstub/vsprintf.c
94
q += 4749 * d3 + 42 * d2;
drivers/firmware/efi/libstub/vsprintf.c
95
q = put_dec_helper4(p, q);
drivers/firmware/efi/libstub/vsprintf.c
98
q += 281 * d3;
drivers/firmware/efi/libstub/vsprintf.c
99
q = put_dec_helper4(p, q);
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
651
struct list_head *p, *q;
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
664
q = (struct list_head *)&board->wait.WQH;
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
665
list_for_each(p, q) {
drivers/gpio/gpiolib-of.c
716
const of_find_gpio_quirk *q;
drivers/gpio/gpiolib-of.c
727
for (q = of_find_gpio_quirks; gpiod_not_found(desc) && *q; q++)
drivers/gpio/gpiolib-of.c
728
desc = (*q)(np, con_id, idx, &of_flags);
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
555
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4548
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
4550
nv_grbm_select(adev, me, pipe, q, vm);
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
1046
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
1048
soc21_grbm_select(adev, me, pipe, q, vm);
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
898
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
900
soc24_grbm_select(adev, me, pipe, q, vm);
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
711
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
713
soc_v1_0_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
3003
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
4065
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
4067
cik_srbm_select(adev, me, pipe, q, vm);
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
3412
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
3414
vi_srbm_select(adev, me, pipe, q, vm);
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1996
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
1998
soc15_grbm_select(adev, me, pipe, q, vm, 0);
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
782
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
784
soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1483
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1487
q = pqm_get_user_queue(&p->pqm, args->queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1489
if (q) {
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
1490
dev = q->device;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
1159
if (!pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
1162
found_mask |= pqn->q->properties.exception_status;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
166
if (!pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
170
pqn->q->properties.queue_id :
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
171
pqn->q->doorbell_id;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
173
if (pqn->q->device != dev || target_id != source_id)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
176
pqn->q->properties.exception_status |= event_mask;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
228
if (!(pqn->q && pqn->q->device == dev &&
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
229
pqn->q->doorbell_id == doorbell_id))
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
232
kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
299
static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
304
if (!q)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
307
if (!kfd_dbg_has_cwsr_workaround(q->device))
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
310
if (enable && q->properties.is_user_cu_masked)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
315
q->properties.is_dbg_wa = enable;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
316
err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo);
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
318
q->properties.is_dbg_wa = false;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
330
r = kfd_dbg_set_queue_workaround(pqn->q, enable);
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
339
kfd_dbg_set_queue_workaround(pqn->q, false);
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
55
if (!pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
58
tmp &= pqn->q->properties.exception_status;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
63
*event_status = pqn->q->properties.exception_status;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
64
*queue_id = pqn->q->properties.queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
65
*gpu_id = pqn->q->device->id;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
66
pqn->q->properties.exception_status &= ~exception_clear_mask;
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
674
if (!pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
677
pqn->q->properties.exception_status = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1005
mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1013
if (q->properties.is_active && !prev_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1014
increment_queue_count(dqm, &pdd->qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1015
} else if (!q->properties.is_active && prev_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1016
decrement_queue_count(dqm, &pdd->qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1017
} else if (q->gws && !q->properties.is_gws) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1018
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1022
q->properties.is_gws = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1023
} else if (!q->gws && q->properties.is_gws) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1024
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1028
q->properties.is_gws = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1034
else if (q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1035
retval = add_queue_mes(dqm, q, &pdd->qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1036
} else if (q->properties.is_active &&
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1037
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1038
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1039
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1040
if (WARN(q->process->mm != current->mm,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1044
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1045
q->pipe, q->queue,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1046
&q->properties, current->mm);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1064
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1068
if (q->properties.is_suspended)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1073
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1075
is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1077
if (is_new || q->properties.is_being_destroyed) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1080
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1084
q->properties.is_suspended = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1085
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1087
int r = remove_queue_mes(dqm, q, &pdd->qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1093
decrement_queue_count(dqm, &pdd->qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1094
q->properties.is_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1110
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1114
if (!q->properties.is_suspended)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1121
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1123
q->properties.is_suspended = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1125
if (QUEUE_IS_ACTIVE(q->properties)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1127
int r = add_queue_mes(dqm, q, &pdd->qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1133
q->properties.is_active = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1134
increment_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1143
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1160
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1161
q->properties.is_evicted = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1162
if (!q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1166
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1167
q->properties.is_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1168
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1173
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1177
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1193
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1220
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1221
q->properties.is_evicted = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1222
if (!q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1225
q->properties.is_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1226
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1229
retval = remove_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1232
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1256
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1302
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1303
q->properties.is_evicted = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1304
if (!QUEUE_IS_ACTIVE(q->properties))
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1308
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1309
q->properties.is_active = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1310
increment_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1315
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1316
q->queue, &q->properties, mm);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1336
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1367
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1368
q->properties.is_evicted = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1369
if (!QUEUE_IS_ACTIVE(q->properties))
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1372
q->properties.is_active = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1373
increment_queue_count(dqm, &pdd->qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1376
retval = add_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1379
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1577
struct queue *q, const uint32_t *restore_sdma_id)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1582
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1596
q->sdma_id = *restore_sdma_id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1602
q->sdma_id = bit;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1605
q->properties.sdma_engine_id =
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1606
q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1607
q->properties.sdma_queue_id = q->sdma_id /
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1609
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1622
q->sdma_id = *restore_sdma_id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1627
q->sdma_id = bit;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
163
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1635
q->properties.sdma_engine_id =
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1637
q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1638
q->properties.sdma_queue_id = q->sdma_id /
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1640
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1644
if (q->properties.sdma_engine_id < kfd_get_num_sdma_engines(dqm->dev)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1647
q->properties.type = KFD_QUEUE_TYPE_SDMA;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1652
q->properties.type = KFD_QUEUE_TYPE_SDMA_XGMI;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1657
start_engine = q->properties.sdma_engine_id - eng_offset;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1664
q->sdma_id = i;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1665
q->properties.sdma_queue_id = q->sdma_id / num_engines;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1672
q->properties.sdma_engine_id, num_queues);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1677
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1678
pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
168
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1684
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1686
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1687
if (q->sdma_id >= get_num_sdma_queues(dqm))
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1689
set_bit(q->sdma_id, dqm->sdma_bitmap);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1690
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1691
if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1693
set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1968
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1983
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1984
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1985
q->properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1987
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1993
retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1998
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2000
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2001
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2002
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2003
q->properties.tba_addr = qpd->tba_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2004
q->properties.tma_addr = qpd->tma_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2005
q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr, &q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2006
if (!q->mqd_mem_obj) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
201
static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2017
q->properties.is_evicted = !!qpd->evicted;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2018
q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2019
kfd_dbg_has_cwsr_workaround(q->device);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2022
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2023
&q->properties, restore_mqd, restore_ctl_stack,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2026
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2027
&q->gart_mqd_addr, &q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2029
list_add(&q->list, &qpd->queues_list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2032
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2033
increment_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2039
retval = add_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2058
list_del(&q->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2059
if (q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2060
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2061
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2064
deallocate_doorbell(qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2066
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2067
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2069
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2131
static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2137
q->properties.queue_id, pdd->process->lead_thread->pid);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2140
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2141
q->properties.is_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2142
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2193
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2197
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2198
if (queue_address == q->properties.queue_address)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2199
return q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2219
struct queue *q = find_queue_by_address(dqm, hang_info.queue_address);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2223
if (!q) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2228
pdd = kfd_get_process_device_data(dqm->dev, q->process);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2239
if (queue_addr != q->properties.queue_address) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
224
queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2244
set_queue_as_reset(dqm, q, &pdd->qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
225
queue_input.inprocess_gang_priority = q->properties.priority;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
228
queue_input.doorbell_offset = q->properties.doorbell_off;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2283
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2287
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2288
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2289
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) &&
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
229
queue_input.mqd_addr = q->gart_mqd_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2290
q->properties.doorbell_off == doorbell_off) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2291
set_queue_as_reset(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
230
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
232
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
233
queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
236
queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
237
queue_input.queue_size = q->properties.queue_size >> 2;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
242
queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
246
kfd_dbg_has_ttmps_always_setup(q->device));
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
248
queue_type = convert_to_mes_queue_type(q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2484
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2486
struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2487
q->process);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2496
q->properties.is_being_destroyed = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2498
if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2500
mutex_unlock(&q->process->mutex);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2502
!q->properties.is_suspended);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2504
mutex_lock(&q->process->mutex);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
251
q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2513
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2522
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2523
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2524
retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2528
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2534
retval = wait_on_destroy_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2552
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2554
deallocate_doorbell(qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2556
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2557
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2558
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2562
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2563
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2564
q->properties.is_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
257
queue_input.exclusively_scheduled = q->properties.is_gws;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2572
retval = remove_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2575
list_del(&q->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2593
qpd->pqm->process, q->device,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2596
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2648
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2660
q = list_first_entry(&qpd->queues_list, struct queue, list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2662
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2663
ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2667
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
268
q->properties.doorbell_off);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2694
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2705
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2706
q->properties.is_active || !q->device->kfd->cwsr_enabled ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2719
return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2724
const struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2730
get_mqd_type_from_queue_type(q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2737
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2738
mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2744
const struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2751
get_mqd_type_from_queue_type(q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2755
if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
276
static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2766
mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2777
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2798
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2799
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2800
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2801
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2802
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2804
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2805
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2808
retval = remove_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2811
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2844
q = list_first_entry(&qpd->queues_list, struct queue, list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2846
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2847
list_del(&q->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2850
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
289
queue_input.doorbell_offset = q->properties.doorbell_off;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
290
queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
300
q->properties.doorbell_off);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3067
struct queue *q = NULL;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3078
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3079
if (q->doorbell_id == doorbell_id && q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3086
q->properties.is_evicted = true;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3087
q->properties.is_active = false;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3088
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3090
ret = remove_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
313
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
318
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
319
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
320
retval = remove_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
324
q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3251
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3273
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3274
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3286
q->mqd,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3287
&q->properties,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3288
(void __user *) q->properties.ctx_save_restore_area_address,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3330
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3336
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3341
q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3346
int err = resume_single_queue(dqm, &pdd->qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3381
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3383
q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
340
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3433
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3440
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3441
int q_idx = q_array_get_index(q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3446
int err = suspend_single_queue(dqm, pdd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
345
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3452
q->properties.exception_status &=
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
346
if (!q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
348
retval = add_queue_mes(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3483
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3484
int q_idx = q_array_get_index(q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3494
q->properties.exception_status &=
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
352
q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3545
void set_queue_snapshot_entry(struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3549
qss_entry->ring_base_address = q->properties.queue_address;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3550
qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3551
qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3553
q->properties.ctx_save_restore_area_address;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3555
q->properties.ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3556
qss_entry->exception_status = q->properties.exception_status;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3557
qss_entry->queue_id = q->properties.queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3558
qss_entry->gpu_id = q->device->id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3559
qss_entry->ring_size = (uint32_t)q->properties.queue_size;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3560
qss_entry->queue_type = set_queue_type_for_user(&q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3561
q->properties.exception_status &= ~exception_clear_mask;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3620
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3628
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3629
if (q->properties.doorbell_off == doorbell_off) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
3630
*queue_format = q->properties.format;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
404
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
407
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
410
if (q->properties.is_gws) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
418
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
421
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
424
if (q->properties.is_gws) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
435
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
445
if (restore_id && *restore_id != q->properties.queue_id)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
448
q->doorbell_id = q->properties.queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
449
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
450
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
467
q->properties.sdma_engine_id]
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
468
+ (q->properties.sdma_queue_id & 1)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
470
+ (q->properties.sdma_queue_id >> 1);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
474
q->doorbell_id = valid_id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
482
q->doorbell_id = *restore_id;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
494
q->doorbell_id = found;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
498
q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
500
q->doorbell_id,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
506
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
512
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
513
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
516
old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
535
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
561
q->properties.vmid = allocated_vmid;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
604
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
609
if (q->device->adev->asic_type == CHIP_HAWAII)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
610
if (flush_texture_cache_nocpsch(q->device, qpd))
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
620
q->properties.vmid = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
624
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
642
retval = allocate_vmid(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
646
q->properties.vmid = qpd->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
65
struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
652
q->properties.is_evicted = !!qpd->evicted;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
654
q->properties.tba_addr = qpd->tba_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
655
q->properties.tma_addr = qpd->tma_addr;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
658
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
659
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
660
retval = allocate_hqd(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
664
q->pipe, q->queue);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
665
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
666
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
667
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
670
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
673
retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
679
q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr, &q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
68
struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
682
if (!q->mqd_mem_obj) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
688
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
689
&q->properties, restore_mqd, restore_ctl_stack,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
69
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
692
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
693
&q->gart_mqd_addr, &q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
695
if (q->properties.is_active) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
701
if (WARN(q->process->mm != current->mm,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
705
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
706
q->queue, &q->properties, current->mm);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
71
struct queue *q, const uint32_t *restore_sdma_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
712
list_add(&q->list, &qpd->queues_list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
714
if (q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
715
increment_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
727
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
729
deallocate_doorbell(qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
731
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
732
deallocate_hqd(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
733
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
734
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
735
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
738
deallocate_vmid(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
744
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
761
q->pipe = pipe;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
762
q->queue = bit;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
771
pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
779
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
781
dqm->allocated_queues[q->pipe] |= (1 << q->queue);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
857
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
862
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
864
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
865
deallocate_hqd(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
866
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
867
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
868
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
869
deallocate_sdma_queue(dqm, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
872
q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
877
deallocate_doorbell(qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
884
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
887
q->pipe, q->queue);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
891
list_del(&q->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
904
deallocate_vmid(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
907
if (q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
908
decrement_queue_count(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
915
struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
922
dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
925
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
926
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
927
retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
931
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
935
retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
940
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
945
static int update_queue(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
955
pdd = kfd_get_process_device_data(q->device, q->process);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
961
q->properties.type)];
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
964
prev_active = q->properties.is_active;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
972
retval = remove_queue_mes(dqm, q, &pdd->qpd);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
985
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
986
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
987
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
994
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
998
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
138
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
146
struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
149
struct queue *q, struct mqd_update_info *minfo);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
188
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
196
const struct queue *q, u32 *mqd_size,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
200
const struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
216
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
323
void set_queue_snapshot_entry(struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
160
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
166
q->properties.sdma_vm_addr =
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
48
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
32
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
85
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
89
q->properties.sdma_vm_addr = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
31
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
85
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
89
q->properties.sdma_vm_addr = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
31
static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
85
static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
89
q->properties.sdma_vm_addr = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12_1.c
31
static void init_sdma_vm_v12_1(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12_1.c
93
static void init_sdma_vm_v12_1(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12_1.c
97
q->properties.sdma_vm_addr = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
130
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
134
q->properties.sdma_vm_addr = 0;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
31
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
162
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
168
q->properties.sdma_vm_addr =
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
48
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
293
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
49
struct kfd_mem_obj *allocate_hiq_mqd(struct mqd_manager *mm, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
66
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
76
offset = (q->sdma_engine_id *
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
78
q->sdma_queue_id) *
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
100
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
157
struct queue_properties *q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
160
struct queue_properties *q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
200
struct queue_properties *q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
72
struct queue_properties *q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
76
struct queue_properties *q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
84
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
132
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
134
if (q->format == KFD_QUEUE_FORMAT_AQL)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
140
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
145
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
157
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
174
struct queue_properties *q, struct mqd_update_info *minfo,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
192
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
193
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
194
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
195
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
196
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
197
m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
199
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
201
if (q->format == KFD_QUEUE_FORMAT_AQL)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
205
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
207
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
218
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
221
__update_mqd(mm, mqd, q, minfo, 0);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
225
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
231
m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
233
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
237
m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
238
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
239
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
240
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
242
q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
244
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
246
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
247
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
249
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
331
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
333
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
337
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
352
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
353
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
354
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
355
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
356
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
357
m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
359
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
361
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
363
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
70
static void set_priority(struct cik_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
72
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
77
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
91
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
127
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
136
lower_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
138
upper_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
139
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
140
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
141
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
142
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
148
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
166
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
175
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
179
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
180
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
182
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
183
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
184
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
185
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
188
q->doorbell_off <<
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
203
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
205
lower_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
207
upper_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
211
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
213
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
225
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
227
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
238
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
313
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
317
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
347
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
359
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
365
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
371
m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
373
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
377
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
378
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
379
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
380
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
382
q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
384
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
385
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
388
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
70
static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
72
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
77
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
91
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
103
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
117
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
122
uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
167
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
176
lower_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
178
upper_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
179
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
180
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
181
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
182
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
188
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
206
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
215
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
218
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
219
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
221
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
222
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
223
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
224
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
227
q->doorbell_off <<
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
242
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
244
lower_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
246
upper_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
250
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
252
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
264
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
266
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
277
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
352
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
356
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
386
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
403
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
409
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
415
m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
417
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
422
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
423
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
424
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
425
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
426
m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
427
m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
429
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
435
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
436
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
439
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
96
static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
98
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
142
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
151
lower_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
153
upper_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
154
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
155
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
156
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
157
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
163
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
181
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
190
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
193
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
194
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
196
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
197
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
198
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
199
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
202
q->doorbell_off <<
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
217
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
219
lower_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
221
upper_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
225
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
227
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
239
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
241
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
252
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
290
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
294
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
304
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
316
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
322
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
328
m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
330
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
335
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
336
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
337
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
338
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
339
m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
340
m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
342
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
348
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
349
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
359
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
77
static void set_priority(struct v12_compute_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
79
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
84
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
98
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
131
static void set_priority(struct v12_1_compute_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
133
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
138
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
144
if (q->type == KFD_QUEUE_TYPE_COMPUTE)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
155
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
198
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
207
lower_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
209
upper_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
210
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
211
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
212
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
213
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
219
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
237
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
246
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
250
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
251
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
253
if (q->metadata_queue_size) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
255
if (q->metadata_queue_size == q->queue_size * 4) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
262
lower_32_bits((q->queue_address + q->queue_size) >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
264
upper_32_bits((q->queue_address + q->queue_size) >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
274
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
275
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
276
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
277
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
280
q->doorbell_off <<
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
295
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
297
lower_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
299
upper_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
303
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
305
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
316
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
318
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
327
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
365
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
369
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
379
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
391
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
397
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
403
m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
405
q->vmid << SDMA0_SDMA_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
410
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
411
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
412
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
413
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
414
m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
415
m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
417
q->doorbell_off << SDMA0_SDMA_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
423
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
424
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
434
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
450
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
457
uint64_t offset = mm->mqd_stride(mm, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
464
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
473
q->ctx_save_restore_area_address) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
475
q->ctx_save_restore_area_address +
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
476
(xcc * q->ctx_save_restore_area_size);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
484
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
493
m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
505
struct queue_properties *q, struct mqd_update_info *minfo)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
509
uint64_t size = mm->mqd_stride(mm, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
513
update_mqd(mm, m, q, minfo);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
517
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
523
m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
584
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
592
uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
598
q->ctx_save_restore_area_size * xcc);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12_1.c
600
err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
113
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
115
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
134
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
156
if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
161
(ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) +
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
186
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
227
if (q->format == KFD_QUEUE_FORMAT_AQL)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
231
if (q->tba_addr) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
236
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
240
lower_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
242
upper_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
243
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
244
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
245
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
246
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
252
update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
268
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
276
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
279
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
280
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
282
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
283
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
284
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
285
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
288
q->doorbell_off <<
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
308
m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
309
min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
312
lower_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
314
upper_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
318
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
320
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
328
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
335
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
346
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
361
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
38
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
42
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
45
q->type == KFD_QUEUE_TYPE_COMPUTE) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
467
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
471
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
500
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
512
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
518
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
52
return ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) +
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
524
m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
526
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
530
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
531
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
532
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
533
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
535
q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
537
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
538
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
545
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
586
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
598
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
701
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
708
uint64_t offset = mm->mqd_stride(mm, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
715
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
726
q->ctx_save_restore_area_address) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
728
q->ctx_save_restore_area_address +
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
729
(xcc * q->ctx_save_restore_area_size);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
737
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
756
m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
771
struct queue_properties *q, struct mqd_update_info *minfo)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
775
uint64_t size = mm->mqd_stride(mm, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
779
update_mqd(mm, m, q, minfo);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
786
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
801
m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
905
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
913
uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
919
q->ctx_save_restore_area_size * xcc);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
921
err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
124
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
127
if (q->format == KFD_QUEUE_FORMAT_AQL)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
130
if (q->tba_addr) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
131
m->compute_tba_lo = lower_32_bits(q->tba_addr >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
132
m->compute_tba_hi = upper_32_bits(q->tba_addr >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
133
m->compute_tma_lo = lower_32_bits(q->tma_addr >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
134
m->compute_tma_hi = upper_32_bits(q->tma_addr >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
139
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
143
lower_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
145
upper_32_bits(q->ctx_save_restore_area_address);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
146
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
147
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
148
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
149
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
155
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
172
struct queue_properties *q, struct mqd_update_info *minfo,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
182
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
185
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
186
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
188
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
189
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
190
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
191
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
194
q->doorbell_off <<
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
214
order_base_2(q->eop_ring_buffer_size / 4) - 1);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
216
lower_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
218
upper_32_bits(q->eop_ring_buffer_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
223
m->cp_hqd_vmid = q->vmid;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
225
if (q->format == KFD_QUEUE_FORMAT_AQL) {
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
230
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
236
set_priority(m, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
238
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
249
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
252
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
256
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
322
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
326
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
335
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
338
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
343
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
355
mm->update_mqd(mm, m, q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
359
struct queue_properties *q,
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
365
m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
367
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
371
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
372
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
373
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
374
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
376
q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
378
m->sdmax_rlcx_virtual_addr = q->sdma_vm_addr;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
380
m->sdma_engine_id = q->sdma_engine_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
381
m->sdma_queue_id = q->sdma_queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
383
q->is_active = QUEUE_IS_ACTIVE(*q);
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
73
static void set_priority(struct vi_mqd *m, struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
75
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
80
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
94
struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
148
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
223
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
224
if (!q->properties.is_active)
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
229
q->queue, qpd->is_debug);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
233
q,
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
228
struct queue *q, bool is_static)
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
243
packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
249
switch (q->properties.type) {
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
257
if (q->properties.sdma_engine_id < 2 &&
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
258
!pm_use_ext_eng(q->device->kfd))
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
259
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
267
if (q->properties.sdma_engine_id >= 8)
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
274
packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
278
WARN(1, "queue type %d", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
282
q->properties.doorbell_off;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
285
lower_32_bits(q->gart_mqd_addr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
288
upper_32_bits(q->gart_mqd_addr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
291
lower_32_bits((uint64_t)q->properties.write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
294
upper_32_bits((uint64_t)q->properties.write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
144
struct queue *q, bool is_static)
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
163
switch (q->properties.type) {
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
171
packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
175
WARN(1, "queue type %d", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
179
q->properties.doorbell_off;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
182
lower_32_bits(q->gart_mqd_addr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
185
upper_32_bits(q->gart_mqd_addr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
188
lower_32_bits((uint64_t)q->properties.write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
191
upper_32_bits((uint64_t)q->properties.write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1155
int kfd_procfs_add_queue(struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1156
void kfd_procfs_del_queue(struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1335
int init_queue(struct queue **q, const struct queue_properties *properties);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1336
void uninit_queue(struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1337
void print_queue_properties(struct queue_properties *q);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1338
void print_queue(struct queue *q);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1373
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
1462
struct queue *q, bool is_static);
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
549
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
550
(q).queue_address != 0 && \
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
551
(q).queue_percent > 0 && \
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
552
!(q).is_evicted && \
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
553
!(q).is_suspended)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
105
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
151
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_process.c
152
if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
drivers/gpu/drm/amd/amdkfd/kfd_process.c
153
(q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
drivers/gpu/drm/amd/amdkfd/kfd_process.c
163
sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
164
sdma_q->queue_id = q->properties.queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
213
list_for_each_entry(q, &qpd->queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_process.c
217
if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
drivers/gpu/drm/amd/amdkfd/kfd_process.c
218
(q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
drivers/gpu/drm/amd/amdkfd/kfd_process.c
222
if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
drivers/gpu/drm/amd/amdkfd/kfd_process.c
223
(sdma_q->queue_id == q->properties.queue_id)) {
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2329
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2348
q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2350
if (!q)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
2353
csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
406
struct queue *q = container_of(kobj, struct queue, kobj);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
410
q->properties.queue_size);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
412
return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
414
return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
520
int kfd_procfs_add_queue(struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
525
if (!q || !q->process)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
527
proc = q->process;
drivers/gpu/drm/amd/amdkfd/kfd_process.c
532
ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
drivers/gpu/drm/amd/amdkfd/kfd_process.c
533
proc->kobj_queues, "%u", q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
536
q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
537
kobject_put(&q->kobj);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
680
void kfd_procfs_del_queue(struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
682
if (!q)
drivers/gpu/drm/amd/amdkfd/kfd_process.c
685
kobject_del(&q->kobj);
drivers/gpu/drm/amd/amdkfd/kfd_process.c
686
kobject_put(&q->kobj);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1080
if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1085
pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1086
pqn->q, mqd_size,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1097
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1105
if (pqn->q) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1106
q = pqn->q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1107
switch (q->properties.type) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1111
q->device->id);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1116
q->device->id);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1118
num_xccs = NUM_XCC(q->device->xcc_mask);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1123
q->properties.type, q->device->id);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1126
mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1128
&q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1132
mqd = q->mqd + size * xcc;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
120
if (pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
121
dev = pqn->q->device;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
147
pqn->q->gws);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
150
pqn->q->gws = mem;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
158
pqn->q->gws = gws ? ERR_PTR(-ENOMEM) : NULL;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
164
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
165
pqn->q, &minfo);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
194
dev = pqn->q->device;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
202
if (pqn->q->gws) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
203
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
204
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 4) &&
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
205
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 5, 0) &&
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
208
pqm->process->kgd_process_info, pqn->q->gws);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
213
amdgpu_amdkfd_free_kernel_mem(dev->adev, &pqn->q->gang_ctx_bo);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
214
amdgpu_amdkfd_free_kernel_mem(dev->adev, (void **)&pqn->q->wptr_bo_gart);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
223
if (pqn->q) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
224
struct kfd_process_device *pdd = kfd_get_process_device_data(pqn->q->device,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
227
kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
228
kfd_queue_release_buffers(pdd, &pqn->q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
235
kfd_procfs_del_queue(pqn->q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
236
uninit_queue(pqn->q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
246
struct kfd_node *dev, struct queue **q,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
260
retval = init_queue(q, q_properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
264
(*q)->device = dev;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
265
(*q)->process = pqm->process;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
271
&(*q)->gang_ctx_bo,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
272
&(*q)->gang_ctx_gpu_addr,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
273
&(*q)->gang_ctx_cpu_ptr,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
279
memset((*q)->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
291
&(*q)->wptr_bo_gart);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
302
amdgpu_amdkfd_free_kernel_mem(dev->adev, &(*q)->gang_ctx_bo);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
304
uninit_queue(*q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
305
*q = NULL;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
320
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
335
q = NULL;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
39
if ((pqn->q && pqn->q->properties.queue_id == qid) ||
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
402
retval = init_user_queue(pqm, dev, &q, properties, *qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
405
pqn->q = q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
407
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
409
print_queue(q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
423
retval = init_user_queue(pqm, dev, &q, properties, *qid);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
426
pqn->q = q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
428
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
430
print_queue(q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
450
if (q && p_doorbell_offset_in_process) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
462
*p_doorbell_offset_in_process = (q->properties.doorbell_off
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
470
if (q) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
472
kfd_procfs_add_queue(q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
473
print_queue_properties(&q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
479
uninit_queue(q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
513
if (pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
514
dev = pqn->q->device;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
531
if (pqn->q) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
532
retval = kfd_queue_unref_bo_vas(pdd, &pqn->q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
536
dqm = pqn->q->device->dqm;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
537
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
541
pqn->q->properties.queue_id, retval);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
545
kfd_procfs_del_queue(pqn->q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
546
kfd_queue_release_buffers(pdd, &pqn->q->properties);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
548
uninit_queue(pqn->q);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
570
if (!pqn || !pqn->q) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
581
struct queue *q = pqn->q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
584
pdd = kfd_get_process_device_data(q->device, q->process);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
600
kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
601
kfd_queue_buffer_put(&pqn->q->properties.ring_bo);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
604
pqn->q->properties.ring_bo = p->ring_bo;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
607
pqn->q->properties.queue_address = p->queue_address;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
608
pqn->q->properties.queue_size = p->queue_size;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
609
pqn->q->properties.queue_percent = p->queue_percent;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
610
pqn->q->properties.priority = p->priority;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
611
pqn->q->properties.pm4_target_xcc = p->pm4_target_xcc;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
613
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
614
pqn->q, NULL);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
634
if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
639
KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
652
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
653
pqn->q, minfo);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
658
pqn->q->properties.is_user_cu_masked = true;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
669
return pqn ? pqn->q : NULL;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
687
return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
688
pqn->q,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
715
if (!pqn->q)
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
719
set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
735
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
742
q->properties.queue_id,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
756
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
766
list_for_each_entry(q, &pdd->qpd.queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
767
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
768
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
769
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
774
ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
780
pr_err("Unsupported queue type (%d)\n", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
804
if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
809
return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
810
pqn->q, mqd, ctl_stack);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
814
struct queue *q,
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
824
q_data->type = q->properties.type;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
825
q_data->format = q->properties.format;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
826
q_data->q_id = q->properties.queue_id;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
827
q_data->q_address = q->properties.queue_address;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
828
q_data->q_size = q->properties.queue_size;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
829
q_data->priority = q->properties.priority;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
830
q_data->q_percent = q->properties.queue_percent;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
831
q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
832
q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
833
q_data->doorbell_id = q->doorbell_id;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
835
q_data->sdma_id = q->sdma_id;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
838
q->properties.eop_ring_buffer_address;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
840
q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
843
q->properties.ctx_save_restore_area_address;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
846
q->properties.ctx_save_restore_area_size;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
848
q_data->gws = !!q->gws;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
850
ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
867
struct queue *q;
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
870
list_for_each_entry(q, &pdd->qpd.queues_list, list) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
876
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
877
q->properties.type != KFD_QUEUE_TYPE_SDMA &&
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
878
q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
880
pr_err("Unsupported queue type (%d)\n", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
885
ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
912
ret = criu_checkpoint_queue(pdd, q, q_data);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
30
void print_queue_properties(struct queue_properties *q)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
32
if (!q)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
36
pr_debug("Queue Type: %u\n", q->type);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
37
pr_debug("Queue Size: %llu\n", q->queue_size);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
38
pr_debug("Queue percent: %u\n", q->queue_percent);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
39
pr_debug("Queue Address: 0x%llX\n", q->queue_address);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
40
pr_debug("Queue Id: %u\n", q->queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
41
pr_debug("Queue Process Vmid: %u\n", q->vmid);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
42
pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
43
pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
44
pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
45
pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
48
void print_queue(struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
50
if (!q)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
53
pr_debug("Queue Type: %u\n", q->properties.type);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
54
pr_debug("Queue Size: %llu\n", q->properties.queue_size);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
55
pr_debug("Queue percent: %u\n", q->properties.queue_percent);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
56
pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
57
pr_debug("Queue Id: %u\n", q->properties.queue_id);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
58
pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
59
pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
60
pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
61
pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
62
pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
63
pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
64
pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
65
pr_debug("Queue Process Address: 0x%p\n", q->process);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
66
pr_debug("Queue Device Address: 0x%p\n", q->device);
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
69
int init_queue(struct queue **q, const struct queue_properties *properties)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
79
*q = tmp_q;
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
83
void uninit_queue(struct queue *q)
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
85
kfree(q);
drivers/gpu/drm/drm_edid.c
5408
u32 max_avg, min_cll, max, min, q, r;
drivers/gpu/drm/drm_edid.c
5432
q = max_avg >> 5;
drivers/gpu/drm/drm_edid.c
5434
max = (1 << q) * pre_computed_values[r];
drivers/gpu/drm/drm_edid.c
5437
q = DIV_ROUND_CLOSEST(min_cll, 255);
drivers/gpu/drm/drm_edid.c
5438
min = max * DIV_ROUND_CLOSEST((q * q), 100);
drivers/gpu/drm/i915/display/intel_quirks.c
263
struct intel_quirk *q = &intel_quirks[i];
drivers/gpu/drm/i915/display/intel_quirks.c
265
if (d->device == q->device &&
drivers/gpu/drm/i915/display/intel_quirks.c
266
(d->subsystem_vendor == q->subsystem_vendor ||
drivers/gpu/drm/i915/display/intel_quirks.c
267
q->subsystem_vendor == PCI_ANY_ID) &&
drivers/gpu/drm/i915/display/intel_quirks.c
268
(d->subsystem_device == q->subsystem_device ||
drivers/gpu/drm/i915/display/intel_quirks.c
269
q->subsystem_device == PCI_ANY_ID))
drivers/gpu/drm/i915/display/intel_quirks.c
270
q->hook(display);
drivers/gpu/drm/i915/display/intel_quirks.c
286
const struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
drivers/gpu/drm/i915/display/intel_quirks.c
288
if (d->device == q->device &&
drivers/gpu/drm/i915/display/intel_quirks.c
289
(d->subsystem_vendor == q->subsystem_vendor ||
drivers/gpu/drm/i915/display/intel_quirks.c
290
q->subsystem_vendor == PCI_ANY_ID) &&
drivers/gpu/drm/i915/display/intel_quirks.c
291
(d->subsystem_device == q->subsystem_device ||
drivers/gpu/drm/i915/display/intel_quirks.c
292
q->subsystem_device == PCI_ANY_ID) &&
drivers/gpu/drm/i915/display/intel_quirks.c
293
!memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) &&
drivers/gpu/drm/i915/display/intel_quirks.c
294
(!memcmp(q->sink_device_id, ident->device_id,
drivers/gpu/drm/i915/display/intel_quirks.c
296
mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id))))
drivers/gpu/drm/i915/display/intel_quirks.c
297
q->hook(intel_dp);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
620
static void throttle_release(struct i915_request **q, int count)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
625
if (IS_ERR_OR_NULL(q[i]))
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
628
i915_request_put(fetch_and_zero(&q[i]));
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
633
struct i915_request **q, int count)
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
637
if (!IS_ERR_OR_NULL(q[0])) {
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
638
if (i915_request_wait(q[0],
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
643
i915_request_put(q[0]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
647
q[i] = q[i + 1];
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
649
q[i] = intel_context_create_request(ce);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
650
if (IS_ERR(q[i]))
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
651
return PTR_ERR(q[i]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
653
i915_request_get(q[i]);
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
654
i915_request_add(q[i]);
drivers/gpu/drm/i915/gvt/reg.h
67
typeof(_plane) (q) = (_plane); \
drivers/gpu/drm/i915/gvt/reg.h
68
(((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
drivers/gpu/drm/i915/gvt/reg.h
70
(((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
drivers/gpu/drm/i915/gvt/reg.h
72
(((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
drivers/gpu/drm/i915/gvt/scheduler.c
1641
struct list_head *q = workload_q_head(vgpu, engine);
drivers/gpu/drm/i915/gvt/scheduler.c
1667
list_for_each_entry_reverse(last_workload, q, list) {
drivers/gpu/drm/i915/gvt/scheduler.c
1762
if (list_empty(q)) {
drivers/gpu/drm/i915/intel_uncore.h
342
__raw_read(64, q)
drivers/gpu/drm/i915/intel_uncore.h
347
__raw_write(64, q)
drivers/gpu/drm/i915/intel_uncore.h
391
__uncore_read(read64, 64, q, true)
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
85
#define FLCNQ_PRINTK(q,l,p,f,a...) FLCN_PRINTK((q)->qmgr->falcon, l, p, "%s: "f, (q)->name, ##a)
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
86
#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK((q), DEBUG, info, f, ##a)
drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
87
#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK((q), ERROR, err, f, ##a)
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
372
unsigned int itc, ec, q, sc;
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
394
q = (ptr[2] >> 2) & 0x3;
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
410
(itc << 7) | (ec << 4) | (q << 2) | (sc << 0));
drivers/gpu/drm/v3d/v3d_drv.c
164
enum v3d_queue q;
drivers/gpu/drm/v3d/v3d_drv.c
166
for (q = 0; q < V3D_MAX_QUEUES; q++) {
drivers/gpu/drm/v3d/v3d_drv.c
167
struct v3d_queue_state *queue = &v3d->queue[q];
drivers/gpu/drm/v3d/v3d_drv.c
170
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
drivers/gpu/drm/v3d/v3d_drv.c
172
if (job && job->base.entity == &v3d_priv->sched_entity[q]) {
drivers/gpu/drm/v3d/v3d_drv.h
564
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q);
drivers/gpu/drm/v3d/v3d_drv.h
604
void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q);
drivers/gpu/drm/v3d/v3d_fence.c
16
fence->queue = q;
drivers/gpu/drm/v3d/v3d_fence.c
6
struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q)
drivers/gpu/drm/v3d/v3d_fence.c
8
struct v3d_queue_state *queue = &v3d->queue[q];
drivers/gpu/drm/v3d/v3d_gem.c
348
enum v3d_queue q;
drivers/gpu/drm/v3d/v3d_gem.c
355
for (q = 0; q < V3D_MAX_QUEUES; q++)
drivers/gpu/drm/v3d/v3d_gem.c
356
WARN_ON(v3d->queue[q].active_job);
drivers/gpu/drm/v3d/v3d_irq.c
89
v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q,
drivers/gpu/drm/v3d/v3d_irq.c
92
struct v3d_queue_state *queue = &v3d->queue[q];
drivers/gpu/drm/v3d/v3d_irq.c
95
v3d_job_update_stats(queue->active_job, q);
drivers/gpu/drm/v3d/v3d_sched.c
198
v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q)
drivers/gpu/drm/v3d/v3d_sched.c
201
struct v3d_queue_state *queue = &v3d->queue[q];
drivers/gpu/drm/v3d/v3d_sched.c
215
v3d_stats_update(&job->file_priv->stats[q], now);
drivers/gpu/drm/v3d/v3d_sched.c
727
enum v3d_queue q)
drivers/gpu/drm/v3d/v3d_sched.c
747
spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
drivers/gpu/drm/v3d/v3d_sched.c
750
spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
drivers/gpu/drm/v3d/v3d_sched.c
765
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
drivers/gpu/drm/v3d/v3d_sched.c
770
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
drivers/gpu/drm/v3d/v3d_sched.c
771
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
drivers/gpu/drm/v3d/v3d_sched.c
785
return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
drivers/gpu/drm/v3d/v3d_sched.c
928
enum v3d_queue q;
drivers/gpu/drm/v3d/v3d_sched.c
930
for (q = 0; q < V3D_MAX_QUEUES; q++) {
drivers/gpu/drm/v3d/v3d_sched.c
931
if (v3d->queue[q].sched.ready)
drivers/gpu/drm/v3d/v3d_sched.c
932
drm_sched_fini(&v3d->queue[q].sched);
drivers/gpu/drm/xe/tests/xe_migrate.c
205
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
drivers/gpu/drm/xe/tests/xe_migrate.c
214
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
drivers/gpu/drm/xe/tests/xe_migrate.c
224
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
drivers/gpu/drm/xe/tests/xe_migrate.c
243
(unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
drivers/gpu/drm/xe/tests/xe_migrate.c
249
expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
drivers/gpu/drm/xe/tests/xe_migrate.c
250
if (m->q->vm->flags & XE_VM_FLAG_64K)
drivers/gpu/drm/xe/tests/xe_migrate.c
353
xe_vm_lock(m->q->vm, false);
drivers/gpu/drm/xe/tests/xe_migrate.c
355
xe_vm_unlock(m->q->vm);
drivers/gpu/drm/xe/tests/xe_migrate.c
42
struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
drivers/gpu/drm/xe/tests/xe_migrate.c
455
job = xe_bb_create_migration_job(m->q, bb,
drivers/gpu/drm/xe/xe_bb.c
105
xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size);
drivers/gpu/drm/xe/xe_bb.c
109
return xe_sched_job_create(q, addr);
drivers/gpu/drm/xe/xe_bb.c
112
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_bb.c
123
xe_gt_assert(q->gt, second_idx <= bb->len);
drivers/gpu/drm/xe/xe_bb.c
124
xe_gt_assert(q->gt, xe_sched_job_is_migration(q));
drivers/gpu/drm/xe/xe_bb.c
125
xe_gt_assert(q->gt, q->width == 1);
drivers/gpu/drm/xe/xe_bb.c
127
return __xe_bb_create_job(q, bb, addr);
drivers/gpu/drm/xe/xe_bb.c
130
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_bb.c
135
xe_gt_assert(q->gt, !xe_sched_job_is_migration(q));
drivers/gpu/drm/xe/xe_bb.c
136
xe_gt_assert(q->gt, q->width == 1);
drivers/gpu/drm/xe/xe_bb.c
137
return __xe_bb_create_job(q, bb, &addr);
drivers/gpu/drm/xe/xe_bb.c
98
__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
drivers/gpu/drm/xe/xe_bb.h
21
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_bb.h
23
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_devcoredump.c
324
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_devcoredump.c
328
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_devcoredump.c
335
if (q->vm && q->vm->xef) {
drivers/gpu/drm/xe/xe_devcoredump.c
336
process_name = q->vm->xef->process_name;
drivers/gpu/drm/xe/xe_devcoredump.c
337
ss->pid = q->vm->xef->pid;
drivers/gpu/drm/xe/xe_devcoredump.c
342
ss->gt = q->gt;
drivers/gpu/drm/xe/xe_devcoredump.c
346
CLASS(xe_force_wake, fw_ref)(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
drivers/gpu/drm/xe/xe_devcoredump.c
352
ss->ge = xe_guc_exec_queue_snapshot_capture(q);
drivers/gpu/drm/xe/xe_devcoredump.c
355
ss->vm = xe_vm_snapshot_capture(q->vm);
drivers/gpu/drm/xe/xe_devcoredump.c
357
xe_engine_snapshot_capture_for_queue(q);
drivers/gpu/drm/xe/xe_devcoredump.c
375
void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...)
drivers/gpu/drm/xe/xe_devcoredump.c
377
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_devcoredump.c
395
devcoredump_snapshot(coredump, q, job);
drivers/gpu/drm/xe/xe_devcoredump.c
77
static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_devcoredump.c
79
return &q->gt->uc.guc;
drivers/gpu/drm/xe/xe_devcoredump.h
17
void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...);
drivers/gpu/drm/xe/xe_devcoredump.h
20
static inline void xe_devcoredump(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_device.c
167
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_device.c
178
xa_for_each(&xef->exec_queue.xa, idx, q) {
drivers/gpu/drm/xe/xe_device.c
179
if (q->vm && q->hwe->hw_engine_group)
drivers/gpu/drm/xe/xe_device.c
180
xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
drivers/gpu/drm/xe/xe_device.c
181
xe_exec_queue_kill(q);
drivers/gpu/drm/xe/xe_device.c
182
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_drm_client.c
322
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_drm_client.c
347
xa_for_each(&xef->exec_queue.xa, i, q) {
drivers/gpu/drm/xe/xe_drm_client.c
348
xe_exec_queue_get(q);
drivers/gpu/drm/xe/xe_drm_client.c
351
xe_exec_queue_update_run_ticks(q);
drivers/gpu/drm/xe/xe_drm_client.c
354
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec.c
118
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec.c
138
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
drivers/gpu/drm/xe/xe_exec.c
139
if (XE_IOCTL_DBG(xe, !q))
drivers/gpu/drm/xe/xe_exec.c
142
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) {
drivers/gpu/drm/xe/xe_exec.c
148
q->width != args->num_batch_buffer)) {
drivers/gpu/drm/xe/xe_exec.c
153
if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
drivers/gpu/drm/xe/xe_exec.c
158
if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) {
drivers/gpu/drm/xe/xe_exec.c
159
trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE);
drivers/gpu/drm/xe/xe_exec.c
172
vm = q->vm;
drivers/gpu/drm/xe/xe_exec.c
195
if (args->num_batch_buffer && xe_exec_queue_is_parallel(q)) {
drivers/gpu/drm/xe/xe_exec.c
197
sizeof(u64) * q->width);
drivers/gpu/drm/xe/xe_exec.c
204
group = q->hwe->hw_engine_group;
drivers/gpu/drm/xe/xe_exec.c
205
mode = xe_hw_engine_group_find_exec_mode(q);
drivers/gpu/drm/xe/xe_exec.c
243
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
drivers/gpu/drm/xe/xe_exec.c
251
xe_exec_queue_last_fence_set(q, vm, fence);
drivers/gpu/drm/xe/xe_exec.c
276
if (xe_vm_is_closed_or_banned(q->vm)) {
drivers/gpu/drm/xe/xe_exec.c
282
if (xe_exec_queue_uses_pxp(q)) {
drivers/gpu/drm/xe/xe_exec.c
283
err = xe_vm_validate_protected(q->vm);
drivers/gpu/drm/xe/xe_exec.c
288
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
drivers/gpu/drm/xe/xe_exec.c
335
xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
drivers/gpu/drm/xe/xe_exec.c
369
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1001
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1007
err = __exec_queue_user_extensions(xe, q, extensions, 0, &properties);
drivers/gpu/drm/xe/xe_exec_queue.c
1011
err = exec_queue_user_ext_check_final(q, properties);
drivers/gpu/drm/xe/xe_exec_queue.c
1015
if (xe_exec_queue_is_multi_queue_primary(q)) {
drivers/gpu/drm/xe/xe_exec_queue.c
1016
err = xe_exec_queue_group_init(xe, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1098
struct xe_exec_queue *q = NULL;
drivers/gpu/drm/xe/xe_exec_queue.c
110
static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
113
static void xe_exec_queue_group_cleanup(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
115
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_exec_queue.c
1159
if (q)
drivers/gpu/drm/xe/xe_exec_queue.c
1164
q = new;
drivers/gpu/drm/xe/xe_exec_queue.c
1167
&q->multi_gt_link);
drivers/gpu/drm/xe/xe_exec_queue.c
119
if (xe_exec_queue_is_multi_queue_secondary(q)) {
drivers/gpu/drm/xe/xe_exec_queue.c
1205
q = xe_exec_queue_create(xe, vm, logical_mask,
drivers/gpu/drm/xe/xe_exec_queue.c
1210
if (IS_ERR(q))
drivers/gpu/drm/xe/xe_exec_queue.c
1211
return PTR_ERR(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1213
if (xe_exec_queue_is_multi_queue_secondary(q)) {
drivers/gpu/drm/xe/xe_exec_queue.c
1214
err = xe_exec_queue_group_add(xe, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1220
q->lr.context = dma_fence_context_alloc(1);
drivers/gpu/drm/xe/xe_exec_queue.c
1222
err = xe_vm_add_compute_exec_queue(vm, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1227
if (q->vm && q->hwe->hw_engine_group) {
drivers/gpu/drm/xe/xe_exec_queue.c
1228
err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1234
q->xef = xe_file_get(xef);
drivers/gpu/drm/xe/xe_exec_queue.c
1237
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
drivers/gpu/drm/xe/xe_exec_queue.c
124
xe_exec_queue_put(xe_exec_queue_multi_queue_primary(q));
drivers/gpu/drm/xe/xe_exec_queue.c
1246
xe_exec_queue_kill(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1248
if (xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_exec_queue.c
1249
xe_exec_queue_group_delete(xe, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1251
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1261
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
1267
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
drivers/gpu/drm/xe/xe_exec_queue.c
1268
if (XE_IOCTL_DBG(xe, !q))
drivers/gpu/drm/xe/xe_exec_queue.c
1273
args->value = q->ops->reset_status(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1280
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1295
struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1297
return q->lrc[0];
drivers/gpu/drm/xe/xe_exec_queue.c
1306
bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1308
return q->vm && xe_vm_in_lr_mode(q->vm) &&
drivers/gpu/drm/xe/xe_exec_queue.c
1309
!(q->flags & EXEC_QUEUE_FLAG_VM);
drivers/gpu/drm/xe/xe_exec_queue.c
1326
bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1328
if (xe_exec_queue_is_parallel(q)) {
drivers/gpu/drm/xe/xe_exec_queue.c
1331
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_exec_queue.c
1332
if (xe_lrc_seqno(q->lrc[i]) !=
drivers/gpu/drm/xe/xe_exec_queue.c
1333
q->lrc[i]->fence_ctx.next_seqno - 1)
drivers/gpu/drm/xe/xe_exec_queue.c
1340
return xe_lrc_seqno(q->lrc[0]) ==
drivers/gpu/drm/xe/xe_exec_queue.c
1341
q->lrc[0]->fence_ctx.next_seqno - 1;
drivers/gpu/drm/xe/xe_exec_queue.c
1352
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1354
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_exec_queue.c
1363
if (!q->xef)
drivers/gpu/drm/xe/xe_exec_queue.c
1377
lrc = q->lrc[0];
drivers/gpu/drm/xe/xe_exec_queue.c
1379
q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
drivers/gpu/drm/xe/xe_exec_queue.c
1393
void xe_exec_queue_kill(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1395
struct xe_exec_queue *eq = q, *next;
drivers/gpu/drm/xe/xe_exec_queue.c
1399
q->ops->kill(eq);
drivers/gpu/drm/xe/xe_exec_queue.c
1400
xe_vm_remove_compute_exec_queue(q->vm, eq);
drivers/gpu/drm/xe/xe_exec_queue.c
1403
q->ops->kill(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1404
xe_vm_remove_compute_exec_queue(q->vm, q);
drivers/gpu/drm/xe/xe_exec_queue.c
141
static void __xe_exec_queue_free(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1413
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
1420
q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
drivers/gpu/drm/xe/xe_exec_queue.c
1421
if (q)
drivers/gpu/drm/xe/xe_exec_queue.c
1425
if (XE_IOCTL_DBG(xe, !q))
drivers/gpu/drm/xe/xe_exec_queue.c
1428
if (q->vm && q->hwe->hw_engine_group)
drivers/gpu/drm/xe/xe_exec_queue.c
1429
xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1431
xe_exec_queue_kill(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1433
trace_xe_exec_queue_close(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1434
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1439
static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1442
if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
drivers/gpu/drm/xe/xe_exec_queue.c
1443
xe_migrate_job_lock_assert(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1444
} else if (q->flags & EXEC_QUEUE_FLAG_VM) {
drivers/gpu/drm/xe/xe_exec_queue.c
1448
lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
drivers/gpu/drm/xe/xe_exec_queue.c
1457
void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
drivers/gpu/drm/xe/xe_exec_queue.c
1459
xe_exec_queue_last_fence_lockdep_assert(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
146
if (q->tlb_inval[i].dep_scheduler)
drivers/gpu/drm/xe/xe_exec_queue.c
1461
xe_exec_queue_last_fence_put_unlocked(q);
drivers/gpu/drm/xe/xe_exec_queue.c
147
xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
drivers/gpu/drm/xe/xe_exec_queue.c
1470
void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1472
if (q->last_fence) {
drivers/gpu/drm/xe/xe_exec_queue.c
1473
dma_fence_put(q->last_fence);
drivers/gpu/drm/xe/xe_exec_queue.c
1474
q->last_fence = NULL;
drivers/gpu/drm/xe/xe_exec_queue.c
1487
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
149
if (xe_exec_queue_uses_pxp(q))
drivers/gpu/drm/xe/xe_exec_queue.c
1492
xe_exec_queue_last_fence_lockdep_assert(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1494
if (q->last_fence &&
drivers/gpu/drm/xe/xe_exec_queue.c
1495
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
drivers/gpu/drm/xe/xe_exec_queue.c
1496
xe_exec_queue_last_fence_put(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1498
fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
drivers/gpu/drm/xe/xe_exec_queue.c
150
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
drivers/gpu/drm/xe/xe_exec_queue.c
1514
struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1519
lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
drivers/gpu/drm/xe/xe_exec_queue.c
152
if (xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_exec_queue.c
1521
if (q->last_fence &&
drivers/gpu/drm/xe/xe_exec_queue.c
1522
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
drivers/gpu/drm/xe/xe_exec_queue.c
1523
xe_exec_queue_last_fence_put_unlocked(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1525
fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
drivers/gpu/drm/xe/xe_exec_queue.c
153
xe_exec_queue_group_cleanup(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1539
void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
drivers/gpu/drm/xe/xe_exec_queue.c
1542
xe_exec_queue_last_fence_lockdep_assert(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1545
xe_exec_queue_last_fence_put(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1546
q->last_fence = dma_fence_get(fence);
drivers/gpu/drm/xe/xe_exec_queue.c
155
if (q->vm)
drivers/gpu/drm/xe/xe_exec_queue.c
1555
void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1559
xe_exec_queue_last_fence_lockdep_assert(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
156
xe_vm_put(q->vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1563
xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
drivers/gpu/drm/xe/xe_exec_queue.c
1574
void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1577
xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
drivers/gpu/drm/xe/xe_exec_queue.c
158
if (q->xef)
drivers/gpu/drm/xe/xe_exec_queue.c
1580
dma_fence_put(q->tlb_inval[type].last_fence);
drivers/gpu/drm/xe/xe_exec_queue.c
1581
q->tlb_inval[type].last_fence = NULL;
drivers/gpu/drm/xe/xe_exec_queue.c
159
xe_file_put(q->xef);
drivers/gpu/drm/xe/xe_exec_queue.c
1594
struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1600
xe_exec_queue_last_fence_lockdep_assert(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1603
xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
drivers/gpu/drm/xe/xe_exec_queue.c
1606
if (q->tlb_inval[type].last_fence &&
drivers/gpu/drm/xe/xe_exec_queue.c
1608
&q->tlb_inval[type].last_fence->flags))
drivers/gpu/drm/xe/xe_exec_queue.c
1609
xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
drivers/gpu/drm/xe/xe_exec_queue.c
161
kvfree(q->replay_state);
drivers/gpu/drm/xe/xe_exec_queue.c
1611
fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
drivers/gpu/drm/xe/xe_exec_queue.c
162
kfree(q);
drivers/gpu/drm/xe/xe_exec_queue.c
1627
void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
1632
xe_exec_queue_last_fence_lockdep_assert(q, vm);
drivers/gpu/drm/xe/xe_exec_queue.c
1635
xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
drivers/gpu/drm/xe/xe_exec_queue.c
1639
xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
drivers/gpu/drm/xe/xe_exec_queue.c
1640
q->tlb_inval[type].last_fence = dma_fence_get(fence);
drivers/gpu/drm/xe/xe_exec_queue.c
165
static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
1651
int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
drivers/gpu/drm/xe/xe_exec_queue.c
1656
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_exec_queue.c
1660
lrc = READ_ONCE(q->lrc[i]);
drivers/gpu/drm/xe/xe_exec_queue.c
1664
xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
drivers/gpu/drm/xe/xe_exec_queue.c
1666
err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
drivers/gpu/drm/xe/xe_exec_queue.c
167
struct xe_tile *tile = gt_to_tile(q->gt);
drivers/gpu/drm/xe/xe_exec_queue.c
186
dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
drivers/gpu/drm/xe/xe_exec_queue.c
191
q->tlb_inval[i].dep_scheduler = dep_scheduler;
drivers/gpu/drm/xe/xe_exec_queue.c
204
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
211
q = kzalloc_flex(*q, lrc, width);
drivers/gpu/drm/xe/xe_exec_queue.c
212
if (!q)
drivers/gpu/drm/xe/xe_exec_queue.c
215
kref_init(&q->refcount);
drivers/gpu/drm/xe/xe_exec_queue.c
216
q->flags = flags;
drivers/gpu/drm/xe/xe_exec_queue.c
217
q->hwe = hwe;
drivers/gpu/drm/xe/xe_exec_queue.c
218
q->gt = gt;
drivers/gpu/drm/xe/xe_exec_queue.c
219
q->class = hwe->class;
drivers/gpu/drm/xe/xe_exec_queue.c
220
q->width = width;
drivers/gpu/drm/xe/xe_exec_queue.c
221
q->msix_vec = XE_IRQ_DEFAULT_MSIX;
drivers/gpu/drm/xe/xe_exec_queue.c
222
q->logical_mask = logical_mask;
drivers/gpu/drm/xe/xe_exec_queue.c
223
q->fence_irq = &gt->fence_irq[hwe->class];
drivers/gpu/drm/xe/xe_exec_queue.c
224
q->ring_ops = gt->ring_ops[hwe->class];
drivers/gpu/drm/xe/xe_exec_queue.c
225
q->ops = gt->exec_queue_ops;
drivers/gpu/drm/xe/xe_exec_queue.c
226
INIT_LIST_HEAD(&q->lr.link);
drivers/gpu/drm/xe/xe_exec_queue.c
227
INIT_LIST_HEAD(&q->multi_gt_link);
drivers/gpu/drm/xe/xe_exec_queue.c
228
INIT_LIST_HEAD(&q->hw_engine_group_link);
drivers/gpu/drm/xe/xe_exec_queue.c
229
INIT_LIST_HEAD(&q->pxp.link);
drivers/gpu/drm/xe/xe_exec_queue.c
230
q->multi_queue.priority = XE_MULTI_QUEUE_PRIORITY_NORMAL;
drivers/gpu/drm/xe/xe_exec_queue.c
232
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
drivers/gpu/drm/xe/xe_exec_queue.c
233
q->sched_props.preempt_timeout_us =
drivers/gpu/drm/xe/xe_exec_queue.c
235
q->sched_props.job_timeout_ms =
drivers/gpu/drm/xe/xe_exec_queue.c
237
if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
drivers/gpu/drm/xe/xe_exec_queue.c
238
q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
drivers/gpu/drm/xe/xe_exec_queue.c
239
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
drivers/gpu/drm/xe/xe_exec_queue.c
241
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
drivers/gpu/drm/xe/xe_exec_queue.c
243
if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
drivers/gpu/drm/xe/xe_exec_queue.c
244
err = alloc_dep_schedulers(xe, q);
drivers/gpu/drm/xe/xe_exec_queue.c
246
__xe_exec_queue_free(q);
drivers/gpu/drm/xe/xe_exec_queue.c
252
q->vm = xe_vm_get(vm);
drivers/gpu/drm/xe/xe_exec_queue.c
259
err = exec_queue_user_extensions(xe, q, extensions);
drivers/gpu/drm/xe/xe_exec_queue.c
261
__xe_exec_queue_free(q);
drivers/gpu/drm/xe/xe_exec_queue.c
266
return q;
drivers/gpu/drm/xe/xe_exec_queue.c
269
static void __xe_exec_queue_fini(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
273
q->ops->fini(q);
drivers/gpu/drm/xe/xe_exec_queue.c
275
for (i = 0; i < q->width; ++i)
drivers/gpu/drm/xe/xe_exec_queue.c
276
xe_lrc_put(q->lrc[i]);
drivers/gpu/drm/xe/xe_exec_queue.c
279
static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
drivers/gpu/drm/xe/xe_exec_queue.c
290
if (xe_exec_queue_uses_pxp(q) &&
drivers/gpu/drm/xe/xe_exec_queue.c
291
(q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
drivers/gpu/drm/xe/xe_exec_queue.c
292
if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
drivers/gpu/drm/xe/xe_exec_queue.c
301
err = q->ops->init(q);
drivers/gpu/drm/xe/xe_exec_queue.c
315
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_exec_queue.c
318
xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
drivers/gpu/drm/xe/xe_exec_queue.c
319
lrc = xe_lrc_create(q->hwe, q->vm, q->replay_state,
drivers/gpu/drm/xe/xe_exec_queue.c
320
xe_lrc_ring_size(), q->msix_vec, flags);
drivers/gpu/drm/xe/xe_exec_queue.c
327
WRITE_ONCE(q->lrc[i], lrc);
drivers/gpu/drm/xe/xe_exec_queue.c
333
__xe_exec_queue_fini(q);
drivers/gpu/drm/xe/xe_exec_queue.c
342
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
348
q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
drivers/gpu/drm/xe/xe_exec_queue.c
350
if (IS_ERR(q))
drivers/gpu/drm/xe/xe_exec_queue.c
351
return q;
drivers/gpu/drm/xe/xe_exec_queue.c
353
err = __xe_exec_queue_init(q, flags);
drivers/gpu/drm/xe/xe_exec_queue.c
364
if (xe_exec_queue_uses_pxp(q)) {
drivers/gpu/drm/xe/xe_exec_queue.c
365
err = xe_pxp_exec_queue_add(xe->pxp, q);
drivers/gpu/drm/xe/xe_exec_queue.c
370
return q;
drivers/gpu/drm/xe/xe_exec_queue.c
373
__xe_exec_queue_fini(q);
drivers/gpu/drm/xe/xe_exec_queue.c
375
__xe_exec_queue_free(q);
drivers/gpu/drm/xe/xe_exec_queue.c
429
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
444
q = xe_exec_queue_create(xe, migrate_vm,
drivers/gpu/drm/xe/xe_exec_queue.c
448
q = xe_exec_queue_create_class(xe, gt, migrate_vm,
drivers/gpu/drm/xe/xe_exec_queue.c
454
if (!IS_ERR(q)) {
drivers/gpu/drm/xe/xe_exec_queue.c
455
int err = drm_syncobj_create(&q->ufence_syncobj,
drivers/gpu/drm/xe/xe_exec_queue.c
459
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
464
q->user_vm = xe_vm_get(user_vm);
drivers/gpu/drm/xe/xe_exec_queue.c
467
return q;
drivers/gpu/drm/xe/xe_exec_queue.c
473
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
drivers/gpu/drm/xe/xe_exec_queue.c
477
xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
drivers/gpu/drm/xe/xe_exec_queue.c
479
if (q->ufence_syncobj)
drivers/gpu/drm/xe/xe_exec_queue.c
480
drm_syncobj_put(q->ufence_syncobj);
drivers/gpu/drm/xe/xe_exec_queue.c
482
if (xe_exec_queue_uses_pxp(q))
drivers/gpu/drm/xe/xe_exec_queue.c
483
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
drivers/gpu/drm/xe/xe_exec_queue.c
485
xe_exec_queue_last_fence_put_unlocked(q);
drivers/gpu/drm/xe/xe_exec_queue.c
487
xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
drivers/gpu/drm/xe/xe_exec_queue.c
489
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
drivers/gpu/drm/xe/xe_exec_queue.c
490
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
drivers/gpu/drm/xe/xe_exec_queue.c
495
if (q->user_vm) {
drivers/gpu/drm/xe/xe_exec_queue.c
496
xe_vm_put(q->user_vm);
drivers/gpu/drm/xe/xe_exec_queue.c
497
q->user_vm = NULL;
drivers/gpu/drm/xe/xe_exec_queue.c
500
q->ops->destroy(q);
drivers/gpu/drm/xe/xe_exec_queue.c
503
void xe_exec_queue_fini(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
509
xe_exec_queue_update_run_ticks(q);
drivers/gpu/drm/xe/xe_exec_queue.c
510
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
drivers/gpu/drm/xe/xe_exec_queue.c
511
wake_up_var(&q->xef->exec_queue.pending_removal);
drivers/gpu/drm/xe/xe_exec_queue.c
513
__xe_exec_queue_fini(q);
drivers/gpu/drm/xe/xe_exec_queue.c
514
__xe_exec_queue_free(q);
drivers/gpu/drm/xe/xe_exec_queue.c
517
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
drivers/gpu/drm/xe/xe_exec_queue.c
519
switch (q->class) {
drivers/gpu/drm/xe/xe_exec_queue.c
521
snprintf(q->name, sizeof(q->name), "rcs%d", instance);
drivers/gpu/drm/xe/xe_exec_queue.c
524
snprintf(q->name, sizeof(q->name), "vcs%d", instance);
drivers/gpu/drm/xe/xe_exec_queue.c
527
snprintf(q->name, sizeof(q->name), "vecs%d", instance);
drivers/gpu/drm/xe/xe_exec_queue.c
530
snprintf(q->name, sizeof(q->name), "bcs%d", instance);
drivers/gpu/drm/xe/xe_exec_queue.c
533
snprintf(q->name, sizeof(q->name), "ccs%d", instance);
drivers/gpu/drm/xe/xe_exec_queue.c
536
snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
drivers/gpu/drm/xe/xe_exec_queue.c
539
XE_WARN_ON(q->class);
drivers/gpu/drm/xe/xe_exec_queue.c
545
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
548
q = xa_load(&xef->exec_queue.xa, id);
drivers/gpu/drm/xe/xe_exec_queue.c
549
if (q)
drivers/gpu/drm/xe/xe_exec_queue.c
550
xe_exec_queue_get(q);
drivers/gpu/drm/xe/xe_exec_queue.c
553
return q;
drivers/gpu/drm/xe/xe_exec_queue.c
563
static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
572
q->sched_props.priority = value;
drivers/gpu/drm/xe/xe_exec_queue.c
628
static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
633
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
drivers/gpu/drm/xe/xe_exec_queue.c
640
q->sched_props.timeslice_us = value;
drivers/gpu/drm/xe/xe_exec_queue.c
645
exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
drivers/gpu/drm/xe/xe_exec_queue.c
657
return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
drivers/gpu/drm/xe/xe_exec_queue.c
661
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
664
size_t size = xe_gt_lrc_hang_replay_size(q->gt, q->class);
drivers/gpu/drm/xe/xe_exec_queue.c
672
q->replay_state = ptr;
drivers/gpu/drm/xe/xe_exec_queue.c
677
static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
679
struct xe_tile *tile = gt_to_tile(q->gt);
drivers/gpu/drm/xe/xe_exec_queue.c
702
group->primary = q;
drivers/gpu/drm/xe/xe_exec_queue.c
707
q->multi_queue.group = group;
drivers/gpu/drm/xe/xe_exec_queue.c
719
static inline bool xe_exec_queue_supports_multi_queue(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
721
return q->gt->info.multi_queue_engine_class_mask & BIT(q->class);
drivers/gpu/drm/xe/xe_exec_queue.c
724
static int xe_exec_queue_group_validate(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
735
primary = xe_exec_queue_lookup(q->vm->xef, primary_id);
drivers/gpu/drm/xe/xe_exec_queue.c
740
XE_IOCTL_DBG(xe, q->vm != primary->vm) ||
drivers/gpu/drm/xe/xe_exec_queue.c
741
XE_IOCTL_DBG(xe, q->logical_mask != primary->logical_mask)) {
drivers/gpu/drm/xe/xe_exec_queue.c
747
q->multi_queue.valid = true;
drivers/gpu/drm/xe/xe_exec_queue.c
748
q->multi_queue.group = group;
drivers/gpu/drm/xe/xe_exec_queue.c
757
static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
759
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_exec_queue.c
763
xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
drivers/gpu/drm/xe/xe_exec_queue.c
766
err = xa_alloc(&group->xa, &pos, xe_lrc_get(q->lrc[0]),
drivers/gpu/drm/xe/xe_exec_queue.c
769
xe_lrc_put(q->lrc[0]);
drivers/gpu/drm/xe/xe_exec_queue.c
778
q->multi_queue.pos = pos;
drivers/gpu/drm/xe/xe_exec_queue.c
783
static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.c
785
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_exec_queue.c
788
xe_assert(xe, xe_exec_queue_is_multi_queue_secondary(q));
drivers/gpu/drm/xe/xe_exec_queue.c
790
lrc = xa_erase(&group->xa, q->multi_queue.pos);
drivers/gpu/drm/xe/xe_exec_queue.c
795
static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
798
if (XE_IOCTL_DBG(xe, !xe_exec_queue_supports_multi_queue(q)))
drivers/gpu/drm/xe/xe_exec_queue.c
804
if (XE_IOCTL_DBG(xe, !q->vm->xef))
drivers/gpu/drm/xe/xe_exec_queue.c
807
if (XE_IOCTL_DBG(xe, xe_exec_queue_is_parallel(q)))
drivers/gpu/drm/xe/xe_exec_queue.c
810
if (XE_IOCTL_DBG(xe, xe_exec_queue_is_multi_queue(q)))
drivers/gpu/drm/xe/xe_exec_queue.c
817
q->multi_queue.valid = true;
drivers/gpu/drm/xe/xe_exec_queue.c
818
q->multi_queue.is_primary = true;
drivers/gpu/drm/xe/xe_exec_queue.c
819
q->multi_queue.pos = 0;
drivers/gpu/drm/xe/xe_exec_queue.c
827
return xe_exec_queue_group_validate(xe, q, value);
drivers/gpu/drm/xe/xe_exec_queue.c
830
static int exec_queue_set_multi_queue_priority(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
837
if (!q->xef) {
drivers/gpu/drm/xe/xe_exec_queue.c
838
q->multi_queue.priority = value;
drivers/gpu/drm/xe/xe_exec_queue.c
842
if (!xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_exec_queue.c
845
return q->ops->set_multi_queue_priority(q, value);
drivers/gpu/drm/xe/xe_exec_queue.c
849
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
868
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_exec_queue.c
879
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
drivers/gpu/drm/xe/xe_exec_queue.c
880
if (XE_IOCTL_DBG(xe, !q))
drivers/gpu/drm/xe/xe_exec_queue.c
885
ret = exec_queue_set_property_funcs[idx](xe, q, args->value);
drivers/gpu/drm/xe/xe_exec_queue.c
889
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
893
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_exec_queue.c
897
static int exec_queue_user_ext_check(struct xe_exec_queue *q, u64 properties)
drivers/gpu/drm/xe/xe_exec_queue.c
906
if (xe_exec_queue_is_multi_queue_secondary(q) &&
drivers/gpu/drm/xe/xe_exec_queue.c
913
static int exec_queue_user_ext_check_final(struct xe_exec_queue *q, u64 properties)
drivers/gpu/drm/xe/xe_exec_queue.c
924
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
952
err = exec_queue_user_ext_check(q, *properties);
drivers/gpu/drm/xe/xe_exec_queue.c
956
return exec_queue_set_property_funcs[idx](xe, q, ext.value);
drivers/gpu/drm/xe/xe_exec_queue.c
960
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
968
static int __exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.c
990
err = exec_queue_user_extension_funcs[idx](xe, q, extensions, properties);
drivers/gpu/drm/xe/xe_exec_queue.c
995
return __exec_queue_user_extensions(xe, q, ext.next_extension,
drivers/gpu/drm/xe/xe_exec_queue.h
100
static inline bool xe_exec_queue_is_multi_queue_secondary(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
102
return xe_exec_queue_is_multi_queue(q) && !xe_exec_queue_is_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_exec_queue.h
112
static inline struct xe_exec_queue *xe_exec_queue_multi_queue_primary(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
114
return xe_exec_queue_is_multi_queue(q) ? q->multi_queue.group->primary : q;
drivers/gpu/drm/xe/xe_exec_queue.h
117
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue.h
119
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue.h
121
void xe_exec_queue_kill(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue.h
142
void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.h
146
void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.h
149
struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.h
153
void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue.h
158
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue.h
160
int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
drivers/gpu/drm/xe/xe_exec_queue.h
162
struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue.h
176
static inline bool xe_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
178
return !xe_exec_queue_is_parallel(q) && xe_exec_queue_is_idle(q);
drivers/gpu/drm/xe/xe_exec_queue.h
34
void xe_exec_queue_fini(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue.h
36
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
drivers/gpu/drm/xe/xe_exec_queue.h
39
xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
41
if (kref_get_unless_zero(&q->refcount))
drivers/gpu/drm/xe/xe_exec_queue.h
42
return q;
drivers/gpu/drm/xe/xe_exec_queue.h
49
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
51
kref_get(&q->refcount);
drivers/gpu/drm/xe/xe_exec_queue.h
52
return q;
drivers/gpu/drm/xe/xe_exec_queue.h
55
static inline void xe_exec_queue_put(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
57
kref_put(&q->refcount, xe_exec_queue_destroy);
drivers/gpu/drm/xe/xe_exec_queue.h
60
static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
62
return q->width > 1;
drivers/gpu/drm/xe/xe_exec_queue.h
65
static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
67
return q->pxp.type;
drivers/gpu/drm/xe/xe_exec_queue.h
76
static inline bool xe_exec_queue_is_multi_queue(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
78
return q->multi_queue.valid;
drivers/gpu/drm/xe/xe_exec_queue.h
88
static inline bool xe_exec_queue_is_multi_queue_primary(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_exec_queue.h
90
return q->multi_queue.is_primary;
drivers/gpu/drm/xe/xe_exec_queue_types.h
259
int (*init)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
261
void (*kill)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
263
void (*fini)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
269
void (*destroy)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
271
int (*set_priority)(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue_types.h
274
int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
drivers/gpu/drm/xe/xe_exec_queue_types.h
276
int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
drivers/gpu/drm/xe/xe_exec_queue_types.h
278
int (*set_multi_queue_priority)(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_exec_queue_types.h
285
int (*suspend)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
295
int (*suspend_wait)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
301
void (*resume)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_exec_queue_types.h
303
bool (*reset_status)(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_execlist.c
116
__start_lrc(port->hwe, exl->q->lrc[0], port->last_ctx_id);
drivers/gpu/drm/xe/xe_execlist.c
137
struct xe_lrc *lrc = exl->q->lrc[0];
drivers/gpu/drm/xe/xe_execlist.c
218
enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
drivers/gpu/drm/xe/xe_execlist.c
313
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_execlist.c
314
struct xe_execlist_exec_queue *exl = job->q->execlist;
drivers/gpu/drm/xe/xe_execlist.c
316
q->ring_ops->emit_job(job);
drivers/gpu/drm/xe/xe_execlist.c
326
xe_exec_queue_update_run_ticks(job->q);
drivers/gpu/drm/xe/xe_execlist.c
335
static int execlist_exec_queue_init(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
344
.name = q->hwe->name,
drivers/gpu/drm/xe/xe_execlist.c
345
.dev = gt_to_xe(q->gt)->drm.dev,
drivers/gpu/drm/xe/xe_execlist.c
348
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_execlist.c
359
exl->q = q;
drivers/gpu/drm/xe/xe_execlist.c
370
exl->port = q->hwe->exl_port;
drivers/gpu/drm/xe/xe_execlist.c
373
q->execlist = exl;
drivers/gpu/drm/xe/xe_execlist.c
374
q->entity = &exl->entity;
drivers/gpu/drm/xe/xe_execlist.c
376
xe_exec_queue_assign_name(q, ffs(q->logical_mask) - 1);
drivers/gpu/drm/xe/xe_execlist.c
387
static void execlist_exec_queue_fini(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
389
struct xe_execlist_exec_queue *exl = q->execlist;
drivers/gpu/drm/xe/xe_execlist.c
401
struct xe_exec_queue *q = ee->q;
drivers/gpu/drm/xe/xe_execlist.c
402
struct xe_execlist_exec_queue *exl = q->execlist;
drivers/gpu/drm/xe/xe_execlist.c
403
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_execlist.c
413
xe_exec_queue_fini(q);
drivers/gpu/drm/xe/xe_execlist.c
416
static void execlist_exec_queue_kill(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
421
static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
423
INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
drivers/gpu/drm/xe/xe_execlist.c
424
queue_work(system_unbound_wq, &q->execlist->destroy_async);
drivers/gpu/drm/xe/xe_execlist.c
427
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_execlist.c
434
static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
drivers/gpu/drm/xe/xe_execlist.c
440
static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_execlist.c
447
static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
453
static int execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
460
static void execlist_exec_queue_resume(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist.c
465
static bool execlist_exec_queue_reset_status(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_execlist_types.h
35
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_gsc.c
209
xe_assert(xe, xe_uc_fw_is_loadable(&gsc->fw) && gsc->q);
drivers/gpu/drm/xe/xe_gsc.c
450
if (gsc->q) {
drivers/gpu/drm/xe/xe_gsc.c
451
xe_exec_queue_put(gsc->q);
drivers/gpu/drm/xe/xe_gsc.c
452
gsc->q = NULL;
drivers/gpu/drm/xe/xe_gsc.c
462
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_gsc.c
479
q = xe_exec_queue_create(xe, NULL,
drivers/gpu/drm/xe/xe_gsc.c
483
if (IS_ERR(q)) {
drivers/gpu/drm/xe/xe_gsc.c
485
err = PTR_ERR(q);
drivers/gpu/drm/xe/xe_gsc.c
496
gsc->q = q;
drivers/gpu/drm/xe/xe_gsc.c
508
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_gsc.c
519
if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
drivers/gpu/drm/xe/xe_gsc.c
89
job = xe_bb_create_job(gsc->q, bb);
drivers/gpu/drm/xe/xe_gsc_submit.c
199
job = xe_bb_create_job(gsc->q, bb);
drivers/gpu/drm/xe/xe_gsc_types.h
36
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_gt.c
173
static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
drivers/gpu/drm/xe/xe_gt.c
180
job = xe_bb_create_job(q, bb);
drivers/gpu/drm/xe/xe_gt.c
198
static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_gt.c
207
ret = emit_job_sync(q, bb, HZ);
drivers/gpu/drm/xe/xe_gt.c
216
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_gt.c
218
struct xe_hw_engine *hwe = q->hwe;
drivers/gpu/drm/xe/xe_gt.c
363
*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr;
drivers/gpu/drm/xe/xe_gt.c
368
cs = xe_lrc_emit_hwe_state_instructions(q, cs);
drivers/gpu/drm/xe/xe_gt.c
372
ret = emit_job_sync(q, bb, HZ);
drivers/gpu/drm/xe/xe_gt.c
387
struct xe_exec_queue *q, *nop_q;
drivers/gpu/drm/xe/xe_gt.c
404
q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
drivers/gpu/drm/xe/xe_gt.c
406
if (IS_ERR(q)) {
drivers/gpu/drm/xe/xe_gt.c
407
err = PTR_ERR(q);
drivers/gpu/drm/xe/xe_gt.c
409
hwe->name, q);
drivers/gpu/drm/xe/xe_gt.c
414
err = emit_wa_job(gt, q);
drivers/gpu/drm/xe/xe_gt.c
417
hwe->name, ERR_PTR(err), q->guc->id);
drivers/gpu/drm/xe/xe_gt.c
439
&q->lrc[0]->bo->vmap,
drivers/gpu/drm/xe/xe_gt.c
440
xe_lrc_pphwsp_offset(q->lrc[0]),
drivers/gpu/drm/xe/xe_gt.c
447
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_guc_capture.c
1877
xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_capture.c
1885
if (!q || !q->gt)
drivers/gpu/drm/xe/xe_guc_capture.c
1888
xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_guc_capture.c
1904
for_each_hw_engine(hwe, q->gt, id) {
drivers/gpu/drm/xe/xe_guc_capture.c
1905
if (hwe != q->hwe)
drivers/gpu/drm/xe/xe_guc_capture.c
1913
struct xe_guc *guc = &q->gt->uc.guc;
drivers/gpu/drm/xe/xe_guc_capture.c
1914
u16 guc_id = q->guc->id;
drivers/gpu/drm/xe/xe_guc_capture.c
1915
u32 lrca = xe_lrc_ggtt_addr(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_capture.c
1943
xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_capture.c
1945
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_guc_capture.c
1949
u32 adj_logical_mask = q->logical_mask;
drivers/gpu/drm/xe/xe_guc_capture.c
1954
for_each_hw_engine(hwe, q->gt, id) {
drivers/gpu/drm/xe/xe_guc_capture.c
1955
if (hwe->class != q->hwe->class ||
drivers/gpu/drm/xe/xe_guc_capture.c
1963
xe_hw_engine_snapshot_capture(hwe, q);
drivers/gpu/drm/xe/xe_guc_capture.c
1967
new = xe_guc_capture_get_matching_and_lock(q);
drivers/gpu/drm/xe/xe_guc_capture.c
1969
struct xe_guc *guc = &q->gt->uc.guc;
drivers/gpu/drm/xe/xe_guc_capture.h
53
struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_guc_capture.h
56
void xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
22
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
1002
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
drivers/gpu/drm/xe/xe_guc_submit.c
1004
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1006
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1010
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
drivers/gpu/drm/xe/xe_guc_submit.c
1013
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
drivers/gpu/drm/xe/xe_guc_submit.c
1016
xe_gt_reset_async(q->gt);
drivers/gpu/drm/xe/xe_guc_submit.c
102
static void clear_exec_queue_enabled(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1032
static int wq_noop_append(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1034
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1036
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1037
u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
drivers/gpu/drm/xe/xe_guc_submit.c
1039
if (wq_wait_for_space(q, wq_space_until_wrap(q)))
drivers/gpu/drm/xe/xe_guc_submit.c
104
atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1044
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
drivers/gpu/drm/xe/xe_guc_submit.c
1047
q->guc->wqi_tail = 0;
drivers/gpu/drm/xe/xe_guc_submit.c
1052
static void wq_item_append(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1054
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1056
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1059
u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
drivers/gpu/drm/xe/xe_guc_submit.c
1063
if (wqi_size > wq_space_until_wrap(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1064
if (wq_noop_append(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1067
if (wq_wait_for_space(q, wqi_size))
drivers/gpu/drm/xe/xe_guc_submit.c
107
static bool exec_queue_pending_enable(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1072
wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1073
wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
drivers/gpu/drm/xe/xe_guc_submit.c
1074
FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
drivers/gpu/drm/xe/xe_guc_submit.c
1076
for (j = 1; j < q->width; ++j) {
drivers/gpu/drm/xe/xe_guc_submit.c
1077
struct xe_lrc *lrc = q->lrc[j];
drivers/gpu/drm/xe/xe_guc_submit.c
1085
wq[q->guc->wqi_tail / sizeof(u32)]));
drivers/gpu/drm/xe/xe_guc_submit.c
1087
q->guc->wqi_tail += wqi_size;
drivers/gpu/drm/xe/xe_guc_submit.c
1088
xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
drivers/gpu/drm/xe/xe_guc_submit.c
109
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
drivers/gpu/drm/xe/xe_guc_submit.c
1092
map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
1093
parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
drivers/gpu/drm/xe/xe_guc_submit.c
1097
static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
drivers/gpu/drm/xe/xe_guc_submit.c
1099
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1100
struct xe_lrc *lrc = q->lrc[0];
drivers/gpu/drm/xe/xe_guc_submit.c
1107
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1110
if (xe_exec_queue_is_parallel(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1111
wq_item_append(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1112
else if (!exec_queue_idle_skip_suspend(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1117
if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
drivers/gpu/drm/xe/xe_guc_submit.c
112
static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1124
q = xe_exec_queue_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1126
if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1128
action[len++] = q->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
1132
if (xe_exec_queue_is_parallel(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1135
q->guc->resume_time = RESUME_PENDING;
drivers/gpu/drm/xe/xe_guc_submit.c
1136
set_exec_queue_pending_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1137
set_exec_queue_enabled(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1138
trace_xe_exec_queue_scheduling_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
114
atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1141
action[len++] = q->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
1142
trace_xe_exec_queue_submit(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1150
action[len++] = q->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
1151
trace_xe_exec_queue_submit(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1161
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_guc_submit.c
1162
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1164
exec_queue_killed_or_banned_or_wedged(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1166
xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1167
exec_queue_banned(q) || exec_queue_suspended(q));
drivers/gpu/drm/xe/xe_guc_submit.c
117
static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1172
if (xe_exec_queue_is_multi_queue_secondary(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1173
struct xe_exec_queue *primary = xe_exec_queue_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1184
if (!exec_queue_registered(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1185
register_exec_queue(q, GUC_CONTEXT_NORMAL);
drivers/gpu/drm/xe/xe_guc_submit.c
1187
q->ring_ops->emit_job(job);
drivers/gpu/drm/xe/xe_guc_submit.c
1188
submit_exec_queue(q, job);
drivers/gpu/drm/xe/xe_guc_submit.c
119
atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1211
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
1213
static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_guc_submit.c
1215
#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
drivers/gpu/drm/xe/xe_guc_submit.c
1218
q->guc->id, \
drivers/gpu/drm/xe/xe_guc_submit.c
122
static bool exec_queue_pending_disable(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1223
struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1225
MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
drivers/gpu/drm/xe/xe_guc_submit.c
1228
if (!xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1229
set_min_preemption_timeout(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
1233
(!exec_queue_pending_enable(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
1234
!exec_queue_pending_disable(q)) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1239
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
124
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
drivers/gpu/drm/xe/xe_guc_submit.c
1241
xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
drivers/gpu/drm/xe/xe_guc_submit.c
1243
xe_gt_reset_async(q->gt);
drivers/gpu/drm/xe/xe_guc_submit.c
1248
clear_exec_queue_enabled(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1249
set_exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1250
set_exec_queue_destroyed(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1251
trace_xe_exec_queue_scheduling_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1257
if (xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1258
handle_multi_queue_secondary_sched_done(guc, q, 0);
drivers/gpu/drm/xe/xe_guc_submit.c
127
static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1276
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
129
atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1299
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
drivers/gpu/drm/xe/xe_guc_submit.c
1300
if (xe_exec_queue_get_unless_zero(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1301
set_exec_queue_wedged(q);
drivers/gpu/drm/xe/xe_guc_submit.c
132
static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1330
static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
drivers/gpu/drm/xe/xe_guc_submit.c
1332
struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1334
u32 timeout_ms = q->sched_props.job_timeout_ms;
drivers/gpu/drm/xe/xe_guc_submit.c
134
atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1341
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
1346
ctx_timestamp = lower_32_bits(xe_lrc_timestamp(q->lrc[0]));
drivers/gpu/drm/xe/xe_guc_submit.c
1351
xe_sched_job_lrc_seqno(job), q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
1355
xe_sched_job_lrc_seqno(job), q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
1361
ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
137
static bool exec_queue_destroyed(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1380
q->guc->id, running_time_ms, timeout_ms, diff);
drivers/gpu/drm/xe/xe_guc_submit.c
1385
static void enable_scheduling(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1387
MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
drivers/gpu/drm/xe/xe_guc_submit.c
1388
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
139
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
drivers/gpu/drm/xe/xe_guc_submit.c
1391
xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1392
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1393
xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1394
xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1396
set_exec_queue_pending_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1397
set_exec_queue_enabled(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1398
trace_xe_exec_queue_scheduling_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1400
if (xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1401
handle_multi_queue_secondary_sched_done(guc, q, 1);
drivers/gpu/drm/xe/xe_guc_submit.c
1407
!exec_queue_pending_enable(q) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1412
set_exec_queue_banned(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1413
xe_gt_reset_async(q->gt);
drivers/gpu/drm/xe/xe_guc_submit.c
1414
xe_sched_tdr_queue_imm(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
1418
static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
drivers/gpu/drm/xe/xe_guc_submit.c
142
static void set_exec_queue_destroyed(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1420
MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
drivers/gpu/drm/xe/xe_guc_submit.c
1421
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1423
xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1424
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1425
xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1427
if (immediate && !xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1428
set_min_preemption_timeout(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
1429
clear_exec_queue_enabled(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1430
set_exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1431
trace_xe_exec_queue_scheduling_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1433
if (xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1434
handle_multi_queue_secondary_sched_done(guc, q, 0);
drivers/gpu/drm/xe/xe_guc_submit.c
144
atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1445
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_guc_submit.c
1446
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
1447
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1454
xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
drivers/gpu/drm/xe/xe_guc_submit.c
147
static void clear_exec_queue_destroyed(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1470
skip_timeout_check = exec_queue_reset(q) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1471
exec_queue_killed_or_banned_or_wedged(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1474
if (xe_exec_queue_is_multi_queue(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
1475
READ_ONCE(q->multi_queue.group->banned))
drivers/gpu/drm/xe/xe_guc_submit.c
1479
if (xe_exec_queue_is_lr(q))
drivers/gpu/drm/xe/xe_guc_submit.c
149
atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1494
if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
drivers/gpu/drm/xe/xe_guc_submit.c
1495
!xe_guc_capture_get_matching_and_lock(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1497
CLASS(xe_force_wake, fw_ref)(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
drivers/gpu/drm/xe/xe_guc_submit.c
1499
xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
drivers/gpu/drm/xe/xe_guc_submit.c
1501
xe_engine_snapshot_capture_for_queue(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1507
if (!skip_timeout_check && !check_timeout(q, job))
drivers/gpu/drm/xe/xe_guc_submit.c
1510
if (!exec_queue_killed(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1511
wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1513
set_exec_queue_banned(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1516
if (!wedged && (exec_queue_enabled(q) || exec_queue_pending_disable(q))) {
drivers/gpu/drm/xe/xe_guc_submit.c
1519
if (exec_queue_reset(q))
drivers/gpu/drm/xe/xe_guc_submit.c
152
static bool exec_queue_banned(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1528
(!exec_queue_pending_enable(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
1529
!exec_queue_pending_disable(q)) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1537
disable_scheduling(q, skip_timeout_check);
drivers/gpu/drm/xe/xe_guc_submit.c
154
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
drivers/gpu/drm/xe/xe_guc_submit.c
1551
!exec_queue_pending_disable(q) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1561
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
1562
xe_devcoredump(q, job,
drivers/gpu/drm/xe/xe_guc_submit.c
1564
q->guc->id, ret, xe_guc_read_stopped(guc));
drivers/gpu/drm/xe/xe_guc_submit.c
1565
xe_gt_reset_async(q->gt);
drivers/gpu/drm/xe/xe_guc_submit.c
157
static void set_exec_queue_banned(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1571
if (q->vm && q->vm->xef) {
drivers/gpu/drm/xe/xe_guc_submit.c
1572
process_name = q->vm->xef->process_name;
drivers/gpu/drm/xe/xe_guc_submit.c
1573
pid = q->vm->xef->pid;
drivers/gpu/drm/xe/xe_guc_submit.c
1576
if (!exec_queue_killed(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1580
q->guc->id, q->flags, process_name, pid);
drivers/gpu/drm/xe/xe_guc_submit.c
1584
if (!exec_queue_killed(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1585
xe_devcoredump(q, job,
drivers/gpu/drm/xe/xe_guc_submit.c
1588
q->guc->id, q->flags);
drivers/gpu/drm/xe/xe_guc_submit.c
159
atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1594
xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
drivers/gpu/drm/xe/xe_guc_submit.c
1596
xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
drivers/gpu/drm/xe/xe_guc_submit.c
1598
if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
drivers/gpu/drm/xe/xe_guc_submit.c
1599
(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
drivers/gpu/drm/xe/xe_guc_submit.c
1601
xe_gt_reset_async(q->gt);
drivers/gpu/drm/xe/xe_guc_submit.c
1613
if (xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1614
xe_guc_exec_queue_group_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1616
xe_guc_exec_queue_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
162
static bool exec_queue_suspended(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1635
static void guc_exec_queue_fini(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1637
struct xe_guc_exec_queue *ge = q->guc;
drivers/gpu/drm/xe/xe_guc_submit.c
1638
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
164
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
drivers/gpu/drm/xe/xe_guc_submit.c
1640
release_guc_id(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
1655
struct xe_exec_queue *q = ge->q;
drivers/gpu/drm/xe/xe_guc_submit.c
1656
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1659
trace_xe_exec_queue_destroy(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1661
if (xe_exec_queue_is_multi_queue_secondary(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1662
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
1665
list_del(&q->multi_queue.link);
drivers/gpu/drm/xe/xe_guc_submit.c
167
static void set_exec_queue_suspended(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1672
xe_exec_queue_fini(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1675
static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1677
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1680
INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
drivers/gpu/drm/xe/xe_guc_submit.c
1683
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1684
__guc_exec_queue_destroy_async(&q->guc->destroy_async);
drivers/gpu/drm/xe/xe_guc_submit.c
1686
queue_work(xe->destroy_wq, &q->guc->destroy_async);
drivers/gpu/drm/xe/xe_guc_submit.c
1689
static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
169
atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1698
guc_exec_queue_destroy_async(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1703
struct xe_exec_queue *q = msg->private_data;
drivers/gpu/drm/xe/xe_guc_submit.c
1704
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1706
xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
drivers/gpu/drm/xe/xe_guc_submit.c
1707
trace_xe_exec_queue_cleanup_entity(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1719
if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw))
drivers/gpu/drm/xe/xe_guc_submit.c
172
static void clear_exec_queue_suspended(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1720
disable_scheduling_deregister(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
1722
__guc_exec_queue_destroy(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
1725
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1727
return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1732
struct xe_exec_queue *q = msg->private_data;
drivers/gpu/drm/xe/xe_guc_submit.c
1733
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1735
if (guc_exec_queue_allowed_to_change_state(q))
drivers/gpu/drm/xe/xe_guc_submit.c
1736
init_policies(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
174
atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1740
static void __suspend_fence_signal(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1742
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1745
if (!q->guc->suspend_pending)
drivers/gpu/drm/xe/xe_guc_submit.c
1748
WRITE_ONCE(q->guc->suspend_pending, false);
drivers/gpu/drm/xe/xe_guc_submit.c
1759
wake_up(&q->guc->suspend_wait);
drivers/gpu/drm/xe/xe_guc_submit.c
1762
static void suspend_fence_signal(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1764
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1766
xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
drivers/gpu/drm/xe/xe_guc_submit.c
1768
xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
drivers/gpu/drm/xe/xe_guc_submit.c
177
static bool exec_queue_reset(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1770
__suspend_fence_signal(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1775
struct xe_exec_queue *q = msg->private_data;
drivers/gpu/drm/xe/xe_guc_submit.c
1776
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1777
bool idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1779
if (!idle_skip_suspend && guc_exec_queue_allowed_to_change_state(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
1780
!exec_queue_suspended(q) && exec_queue_enabled(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1782
((q->guc->resume_time != RESUME_PENDING ||
drivers/gpu/drm/xe/xe_guc_submit.c
1783
xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)));
drivers/gpu/drm/xe/xe_guc_submit.c
1788
q->guc->resume_time);
drivers/gpu/drm/xe/xe_guc_submit.c
1789
s64 wait_ms = q->vm->preempt.min_run_period_ms -
drivers/gpu/drm/xe/xe_guc_submit.c
179
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
drivers/gpu/drm/xe/xe_guc_submit.c
1792
if (wait_ms > 0 && q->guc->resume_time)
drivers/gpu/drm/xe/xe_guc_submit.c
1795
set_exec_queue_suspended(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1796
disable_scheduling(q, false);
drivers/gpu/drm/xe/xe_guc_submit.c
1798
} else if (q->guc->suspend_pending) {
drivers/gpu/drm/xe/xe_guc_submit.c
1800
set_exec_queue_idle_skip_suspend(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1801
set_exec_queue_suspended(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1802
suspend_fence_signal(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1806
static void sched_context(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1808
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1809
struct xe_lrc *lrc = q->lrc[0];
drivers/gpu/drm/xe/xe_guc_submit.c
1812
q->guc->id,
drivers/gpu/drm/xe/xe_guc_submit.c
1815
xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_parallel(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1816
xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1817
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
drivers/gpu/drm/xe/xe_guc_submit.c
1818
xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
182
static void set_exec_queue_reset(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1820
trace_xe_exec_queue_submit(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1828
struct xe_exec_queue *q = msg->private_data;
drivers/gpu/drm/xe/xe_guc_submit.c
1830
if (guc_exec_queue_allowed_to_change_state(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1831
clear_exec_queue_suspended(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1832
if (!exec_queue_enabled(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1833
if (exec_queue_idle_skip_suspend(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1834
struct xe_lrc *lrc = q->lrc[0];
drivers/gpu/drm/xe/xe_guc_submit.c
1836
clear_exec_queue_idle_skip_suspend(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1839
q->guc->resume_time = RESUME_PENDING;
drivers/gpu/drm/xe/xe_guc_submit.c
184
atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1840
set_exec_queue_pending_resume(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1841
enable_scheduling(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1842
} else if (exec_queue_idle_skip_suspend(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1843
clear_exec_queue_idle_skip_suspend(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1844
sched_context(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1847
clear_exec_queue_suspended(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1848
clear_exec_queue_idle_skip_suspend(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1854
struct xe_exec_queue *q = msg->private_data;
drivers/gpu/drm/xe/xe_guc_submit.c
1856
if (guc_exec_queue_allowed_to_change_state(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1858
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1859
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
1869
xe_guc_exec_queue_group_cgp_sync(guc, q, action, len);
drivers/gpu/drm/xe/xe_guc_submit.c
187
static bool exec_queue_killed(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
189
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
drivers/gpu/drm/xe/xe_guc_submit.c
192
static void set_exec_queue_killed(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1923
static int guc_exec_queue_init(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1926
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1938
q->guc = ge;
drivers/gpu/drm/xe/xe_guc_submit.c
1939
ge->q = q;
drivers/gpu/drm/xe/xe_guc_submit.c
194
atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
1946
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
drivers/gpu/drm/xe/xe_guc_submit.c
1947
msecs_to_jiffies(q->sched_props.job_timeout_ms);
drivers/gpu/drm/xe/xe_guc_submit.c
1954
if (xe_exec_queue_is_multi_queue_secondary(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1955
struct xe_exec_queue *primary = xe_exec_queue_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_guc_submit.c
1963
q->name, gt_to_xe(q->gt)->drm.dev);
drivers/gpu/drm/xe/xe_guc_submit.c
197
static bool exec_queue_wedged(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
1974
err = alloc_guc_id(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
1978
q->entity = &ge->entity;
drivers/gpu/drm/xe/xe_guc_submit.c
1985
xe_exec_queue_assign_name(q, q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
199
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
drivers/gpu/drm/xe/xe_guc_submit.c
1991
if (xe_exec_queue_is_multi_queue_secondary(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
1992
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
1994
INIT_LIST_HEAD(&q->multi_queue.link);
drivers/gpu/drm/xe/xe_guc_submit.c
1996
list_add_tail(&q->multi_queue.link, &group->list);
drivers/gpu/drm/xe/xe_guc_submit.c
2000
if (xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2001
trace_xe_exec_queue_create_multi_queue(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2003
trace_xe_exec_queue_create(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2018
static void guc_exec_queue_kill(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
202
static void set_exec_queue_wedged(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2020
trace_xe_exec_queue_kill(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2021
set_exec_queue_killed(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2022
__suspend_fence_signal(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2023
xe_guc_exec_queue_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2026
static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
drivers/gpu/drm/xe/xe_guc_submit.c
2029
xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
drivers/gpu/drm/xe/xe_guc_submit.c
2033
msg->private_data = q;
drivers/gpu/drm/xe/xe_guc_submit.c
2037
xe_sched_add_msg_head(&q->guc->sched, msg);
drivers/gpu/drm/xe/xe_guc_submit.c
2039
xe_sched_add_msg_locked(&q->guc->sched, msg);
drivers/gpu/drm/xe/xe_guc_submit.c
204
atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
2041
xe_sched_add_msg(&q->guc->sched, msg);
drivers/gpu/drm/xe/xe_guc_submit.c
2044
static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
2051
guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD);
drivers/gpu/drm/xe/xe_guc_submit.c
2054
static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
2061
guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
drivers/gpu/drm/xe/xe_guc_submit.c
2069
static void guc_exec_queue_destroy(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
207
static bool exec_queue_pending_resume(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2071
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
drivers/gpu/drm/xe/xe_guc_submit.c
2073
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2074
guc_exec_queue_add_msg(q, msg, CLEANUP);
drivers/gpu/drm/xe/xe_guc_submit.c
2076
__guc_exec_queue_destroy(exec_queue_to_guc(q), q);
drivers/gpu/drm/xe/xe_guc_submit.c
2079
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
2084
if (q->sched_props.priority == priority ||
drivers/gpu/drm/xe/xe_guc_submit.c
2085
exec_queue_killed_or_banned_or_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
209
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
drivers/gpu/drm/xe/xe_guc_submit.c
2092
q->sched_props.priority = priority;
drivers/gpu/drm/xe/xe_guc_submit.c
2093
guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
drivers/gpu/drm/xe/xe_guc_submit.c
2098
static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
drivers/gpu/drm/xe/xe_guc_submit.c
2102
if (q->sched_props.timeslice_us == timeslice_us ||
drivers/gpu/drm/xe/xe_guc_submit.c
2103
exec_queue_killed_or_banned_or_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2110
q->sched_props.timeslice_us = timeslice_us;
drivers/gpu/drm/xe/xe_guc_submit.c
2111
guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
drivers/gpu/drm/xe/xe_guc_submit.c
2116
static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
212
static void set_exec_queue_pending_resume(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2121
if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
drivers/gpu/drm/xe/xe_guc_submit.c
2122
exec_queue_killed_or_banned_or_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2129
q->sched_props.preempt_timeout_us = preempt_timeout_us;
drivers/gpu/drm/xe/xe_guc_submit.c
2130
guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
drivers/gpu/drm/xe/xe_guc_submit.c
2135
static int guc_exec_queue_set_multi_queue_priority(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
214
atomic_or(EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
2140
xe_gt_assert(guc_to_gt(exec_queue_to_guc(q)), xe_exec_queue_is_multi_queue(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2142
if (q->multi_queue.priority == priority ||
drivers/gpu/drm/xe/xe_guc_submit.c
2143
exec_queue_killed_or_banned_or_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2150
q->multi_queue.priority = priority;
drivers/gpu/drm/xe/xe_guc_submit.c
2151
guc_exec_queue_add_msg(q, msg, SET_MULTI_QUEUE_PRIORITY);
drivers/gpu/drm/xe/xe_guc_submit.c
2156
static int guc_exec_queue_suspend(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2158
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2159
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
drivers/gpu/drm/xe/xe_guc_submit.c
2161
if (exec_queue_killed_or_banned_or_wedged(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2165
if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
drivers/gpu/drm/xe/xe_guc_submit.c
2166
q->guc->suspend_pending = true;
drivers/gpu/drm/xe/xe_guc_submit.c
217
static void clear_exec_queue_pending_resume(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2172
static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2174
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2184
(!READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || \
drivers/gpu/drm/xe/xe_guc_submit.c
219
atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
2193
ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
drivers/gpu/drm/xe/xe_guc_submit.c
2202
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2215
static void guc_exec_queue_resume(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2217
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2218
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
drivers/gpu/drm/xe/xe_guc_submit.c
2219
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
222
static bool exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2221
xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
drivers/gpu/drm/xe/xe_guc_submit.c
2224
guc_exec_queue_try_add_msg(q, msg, RESUME);
drivers/gpu/drm/xe/xe_guc_submit.c
2228
static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2230
if (xe_exec_queue_is_multi_queue_secondary(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
2231
guc_exec_queue_reset_status(xe_exec_queue_multi_queue_primary(q)))
drivers/gpu/drm/xe/xe_guc_submit.c
2234
return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
drivers/gpu/drm/xe/xe_guc_submit.c
224
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND;
drivers/gpu/drm/xe/xe_guc_submit.c
2258
static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2260
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2267
if (exec_queue_registered(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2268
if (exec_queue_destroyed(q))
drivers/gpu/drm/xe/xe_guc_submit.c
227
static void set_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2271
if (q->guc->suspend_pending) {
drivers/gpu/drm/xe/xe_guc_submit.c
2272
set_exec_queue_suspended(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2273
suspend_fence_signal(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2278
&q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
2279
q->guc->resume_time = 0;
drivers/gpu/drm/xe/xe_guc_submit.c
2280
trace_xe_exec_queue_stop(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2287
if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
drivers/gpu/drm/xe/xe_guc_submit.c
229
atomic_or(EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
2301
set_exec_queue_banned(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2302
xe_guc_exec_queue_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2307
__guc_exec_queue_destroy(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
232
static void clear_exec_queue_idle_skip_suspend(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
234
atomic_and(~EXEC_QUEUE_STATE_IDLE_SKIP_SUSPEND, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
2347
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2354
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2356
if (q->guc->id != index)
drivers/gpu/drm/xe/xe_guc_submit.c
2359
guc_exec_queue_stop(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
237
static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2372
struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2376
pending_enable = exec_queue_pending_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2377
pending_resume = exec_queue_pending_resume(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2380
q->guc->needs_resume = true;
drivers/gpu/drm/xe/xe_guc_submit.c
2382
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2386
clear_exec_queue_registered(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2388
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
239
return (atomic_read(&q->guc->state) &
drivers/gpu/drm/xe/xe_guc_submit.c
2392
clear_exec_queue_enabled(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2393
clear_exec_queue_pending_resume(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2394
clear_exec_queue_pending_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2396
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2399
if (exec_queue_destroyed(q) && exec_queue_registered(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2400
clear_exec_queue_destroyed(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2401
q->guc->needs_cleanup = true;
drivers/gpu/drm/xe/xe_guc_submit.c
2403
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2406
pending_disable = exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2408
if (pending_disable && exec_queue_suspended(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2409
clear_exec_queue_suspended(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2410
q->guc->needs_suspend = true;
drivers/gpu/drm/xe/xe_guc_submit.c
2412
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2417
set_exec_queue_enabled(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2418
clear_exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2420
q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2423
q->guc->resume_time = 0;
drivers/gpu/drm/xe/xe_guc_submit.c
2443
static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2445
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2455
guc_exec_queue_revert_pending_state_change(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2457
if (xe_exec_queue_is_parallel(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2459
struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
2479
for (i = 0; i < q->width; ++i)
drivers/gpu/drm/xe/xe_guc_submit.c
2480
q->lrc[i]->ring.tail = job->ptrs[i].head;
drivers/gpu/drm/xe/xe_guc_submit.c
2490
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2494
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
drivers/gpu/drm/xe/xe_guc_submit.c
2495
xe_sched_submission_stop(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2505
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2512
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2514
if (q->guc->id != index)
drivers/gpu/drm/xe/xe_guc_submit.c
2517
guc_exec_queue_pause(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2522
static void guc_exec_queue_start(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2524
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2526
if (!exec_queue_killed_or_banned_or_wedged(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2530
trace_xe_exec_queue_resubmit(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2532
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_guc_submit.c
2541
q->lrc[i]->ring.tail = job->ptrs[i].head;
drivers/gpu/drm/xe/xe_guc_submit.c
2542
xe_lrc_set_ring_tail(q->lrc[i],
drivers/gpu/drm/xe/xe_guc_submit.c
2543
xe_lrc_ring_head(q->lrc[i]));
drivers/gpu/drm/xe/xe_guc_submit.c
2555
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2562
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2564
if (q->guc->id != index)
drivers/gpu/drm/xe/xe_guc_submit.c
2567
guc_exec_queue_start(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2577
struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2579
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2589
q->guc->id, xe_sched_job_seqno(job));
drivers/gpu/drm/xe/xe_guc_submit.c
2591
q->ring_ops->emit_job(job);
drivers/gpu/drm/xe/xe_guc_submit.c
2606
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2613
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2615
if (q->guc->id != index)
drivers/gpu/drm/xe/xe_guc_submit.c
2618
guc_exec_queue_unpause_prepare(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2623
static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2625
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2628
if (q->guc->needs_cleanup) {
drivers/gpu/drm/xe/xe_guc_submit.c
2629
msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
drivers/gpu/drm/xe/xe_guc_submit.c
2631
guc_exec_queue_add_msg(q, msg, CLEANUP);
drivers/gpu/drm/xe/xe_guc_submit.c
2632
q->guc->needs_cleanup = false;
drivers/gpu/drm/xe/xe_guc_submit.c
2635
if (q->guc->needs_suspend) {
drivers/gpu/drm/xe/xe_guc_submit.c
2636
msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
drivers/gpu/drm/xe/xe_guc_submit.c
2639
guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
drivers/gpu/drm/xe/xe_guc_submit.c
2642
q->guc->needs_suspend = false;
drivers/gpu/drm/xe/xe_guc_submit.c
2650
if (q->guc->needs_resume) {
drivers/gpu/drm/xe/xe_guc_submit.c
2651
msg = q->guc->static_msgs + STATIC_MSG_RESUME;
drivers/gpu/drm/xe/xe_guc_submit.c
2654
guc_exec_queue_try_add_msg_head(q, msg, RESUME);
drivers/gpu/drm/xe/xe_guc_submit.c
2657
q->guc->needs_resume = false;
drivers/gpu/drm/xe/xe_guc_submit.c
2661
static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2663
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2664
bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2669
guc_exec_queue_replay_pending_state_change(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2672
xe_guc_exec_queue_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2682
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2686
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
drivers/gpu/drm/xe/xe_guc_submit.c
2687
xe_sched_submission_start(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
2697
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2703
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2708
if (q->guc->id != index ||
drivers/gpu/drm/xe/xe_guc_submit.c
2709
!drm_sched_is_stopped(&q->guc->sched.base))
drivers/gpu/drm/xe/xe_guc_submit.c
2712
guc_exec_queue_unpause(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2723
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2727
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2728
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
2731
if (q->guc->id != index)
drivers/gpu/drm/xe/xe_guc_submit.c
2735
guc_exec_queue_kill(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2744
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2751
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
2752
if (unlikely(!q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2757
xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2758
xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
drivers/gpu/drm/xe/xe_guc_submit.c
2760
return q;
drivers/gpu/drm/xe/xe_guc_submit.c
2763
static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2767
q->guc->id,
drivers/gpu/drm/xe/xe_guc_submit.c
2770
xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2771
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2772
xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2773
xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2775
trace_xe_exec_queue_deregister(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2777
if (xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
2778
handle_deregister_done(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
278
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2784
static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
2787
trace_xe_exec_queue_scheduling_done(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2790
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2792
q->guc->resume_time = ktime_get();
drivers/gpu/drm/xe/xe_guc_submit.c
2793
clear_exec_queue_pending_resume(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2794
clear_exec_queue_pending_enable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2799
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
drivers/gpu/drm/xe/xe_guc_submit.c
2801
if (q->guc->suspend_pending) {
drivers/gpu/drm/xe/xe_guc_submit.c
2802
suspend_fence_signal(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2803
clear_exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2805
if (exec_queue_banned(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2809
if (exec_queue_destroyed(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2819
clear_exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
282
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
2820
deregister_exec_queue(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2822
clear_exec_queue_pending_disable(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2829
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
283
if (exec_queue_wedged(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2834
handle_sched_done(guc, q, runnable_state);
drivers/gpu/drm/xe/xe_guc_submit.c
2840
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2849
q = g2h_exec_queue_lookup(guc, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
285
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2850
if (unlikely(!q))
drivers/gpu/drm/xe/xe_guc_submit.c
2853
if (unlikely(!exec_queue_pending_enable(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
2854
!exec_queue_pending_disable(q))) {
drivers/gpu/drm/xe/xe_guc_submit.c
2857
atomic_read(&q->guc->state), q->guc->id,
drivers/gpu/drm/xe/xe_guc_submit.c
2862
handle_sched_done(guc, q, runnable_state);
drivers/gpu/drm/xe/xe_guc_submit.c
2867
static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
2869
trace_xe_exec_queue_deregister_done(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2871
clear_exec_queue_registered(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2872
__guc_exec_queue_destroy(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2877
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2885
q = g2h_exec_queue_lookup(guc, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
2886
if (unlikely(!q))
drivers/gpu/drm/xe/xe_guc_submit.c
2889
if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
drivers/gpu/drm/xe/xe_guc_submit.c
2890
exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
2893
atomic_read(&q->guc->state), q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
2897
handle_deregister_done(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
2905
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2913
q = g2h_exec_queue_lookup(guc, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
2914
if (unlikely(!q))
drivers/gpu/drm/xe/xe_guc_submit.c
2918
xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id,
drivers/gpu/drm/xe/xe_guc_submit.c
2919
atomic_read(&q->guc->state));
drivers/gpu/drm/xe/xe_guc_submit.c
2921
trace_xe_exec_queue_reset(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2929
xe_guc_exec_queue_reset_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
2966
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
2987
q = g2h_exec_queue_lookup(guc, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
2988
if (unlikely(!q))
drivers/gpu/drm/xe/xe_guc_submit.c
2999
type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
3003
xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
3005
trace_xe_exec_queue_memory_cat_error(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3008
xe_guc_exec_queue_reset_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3040
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
3048
q = g2h_exec_queue_lookup(guc, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
3049
if (unlikely(!q))
drivers/gpu/drm/xe/xe_guc_submit.c
3056
trace_xe_exec_queue_cgp_context_error(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3059
xe_guc_exec_queue_reset_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3078
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
3086
q = g2h_exec_queue_lookup(guc, guc_id);
drivers/gpu/drm/xe/xe_guc_submit.c
3087
if (unlikely(!q))
drivers/gpu/drm/xe/xe_guc_submit.c
3090
if (!xe_exec_queue_is_multi_queue_primary(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
3096
WRITE_ONCE(q->multi_queue.group->sync_pending, false);
drivers/gpu/drm/xe/xe_guc_submit.c
3103
guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
3106
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3108
struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
drivers/gpu/drm/xe/xe_guc_submit.c
3111
snapshot->guc.wqi_head = q->guc->wqi_head;
drivers/gpu/drm/xe/xe_guc_submit.c
3112
snapshot->guc.wqi_tail = q->guc->wqi_tail;
drivers/gpu/drm/xe/xe_guc_submit.c
3161
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
3163
struct xe_gpu_scheduler *sched = &q->guc->sched;
drivers/gpu/drm/xe/xe_guc_submit.c
3172
snapshot->guc.id = q->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
3173
memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
drivers/gpu/drm/xe/xe_guc_submit.c
3174
snapshot->class = q->class;
drivers/gpu/drm/xe/xe_guc_submit.c
3175
snapshot->logical_mask = q->logical_mask;
drivers/gpu/drm/xe/xe_guc_submit.c
3176
snapshot->width = q->width;
drivers/gpu/drm/xe/xe_guc_submit.c
3177
snapshot->refcount = kref_read(&q->refcount);
drivers/gpu/drm/xe/xe_guc_submit.c
3179
snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
drivers/gpu/drm/xe/xe_guc_submit.c
3181
q->sched_props.preempt_timeout_us;
drivers/gpu/drm/xe/xe_guc_submit.c
3183
snapshot->lrc = kmalloc_objs(struct xe_lrc_snapshot *, q->width,
drivers/gpu/drm/xe/xe_guc_submit.c
3187
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_guc_submit.c
3188
struct xe_lrc *lrc = q->lrc[i];
drivers/gpu/drm/xe/xe_guc_submit.c
3194
snapshot->schedule_state = atomic_read(&q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
3195
snapshot->exec_queue_flags = q->flags;
drivers/gpu/drm/xe/xe_guc_submit.c
3197
snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3199
guc_exec_queue_wq_snapshot_capture(q, snapshot);
drivers/gpu/drm/xe/xe_guc_submit.c
3201
if (xe_exec_queue_is_multi_queue(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
3203
snapshot->multi_queue.primary = xe_exec_queue_multi_queue_primary(q)->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
3204
snapshot->multi_queue.pos = q->multi_queue.pos;
drivers/gpu/drm/xe/xe_guc_submit.c
3294
static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
drivers/gpu/drm/xe/xe_guc_submit.c
3298
snapshot = xe_guc_exec_queue_snapshot_capture(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3315
void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type)
drivers/gpu/drm/xe/xe_guc_submit.c
3317
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3327
register_exec_queue(q, ctx_type);
drivers/gpu/drm/xe/xe_guc_submit.c
3328
enable_scheduling(q);
drivers/gpu/drm/xe/xe_guc_submit.c
3340
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
3347
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
drivers/gpu/drm/xe/xe_guc_submit.c
3348
guc_exec_queue_print(q, p);
drivers/gpu/drm/xe/xe_guc_submit.c
3361
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
3366
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
drivers/gpu/drm/xe/xe_guc_submit.c
3367
if (q->width > 1)
drivers/gpu/drm/xe/xe_guc_submit.c
3383
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_guc_submit.c
3388
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
drivers/gpu/drm/xe/xe_guc_submit.c
3390
if (q->guc->id != index)
drivers/gpu/drm/xe/xe_guc_submit.c
3393
err = xe_exec_queue_contexts_hwsp_rebase(q, scratch);
drivers/gpu/drm/xe/xe_guc_submit.c
416
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
drivers/gpu/drm/xe/xe_guc_submit.c
423
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
drivers/gpu/drm/xe/xe_guc_submit.c
426
q->guc->id, q->width);
drivers/gpu/drm/xe/xe_guc_submit.c
432
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
447
q->width);
drivers/gpu/drm/xe/xe_guc_submit.c
451
q->guc->id = ret;
drivers/gpu/drm/xe/xe_guc_submit.c
453
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_guc_submit.c
455
q->guc->id + i, q, GFP_NOWAIT));
drivers/gpu/drm/xe/xe_guc_submit.c
463
__release_guc_id(guc, q, i);
drivers/gpu/drm/xe/xe_guc_submit.c
468
static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
471
__release_guc_id(guc, q, q->width);
drivers/gpu/drm/xe/xe_guc_submit.c
524
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
527
enum xe_exec_queue_priority prio = q->sched_props.priority;
drivers/gpu/drm/xe/xe_guc_submit.c
528
u32 timeslice_us = q->sched_props.timeslice_us;
drivers/gpu/drm/xe/xe_guc_submit.c
530
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
drivers/gpu/drm/xe/xe_guc_submit.c
532
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q) &&
drivers/gpu/drm/xe/xe_guc_submit.c
533
!xe_exec_queue_is_multi_queue_secondary(q));
drivers/gpu/drm/xe/xe_guc_submit.c
535
if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY)
drivers/gpu/drm/xe/xe_guc_submit.c
538
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
54
exec_queue_to_guc(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
549
static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
553
xe_assert(guc_to_xe(guc), !xe_exec_queue_is_multi_queue_secondary(q));
drivers/gpu/drm/xe/xe_guc_submit.c
555
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
drivers/gpu/drm/xe/xe_guc_submit.c
56
return &q->gt->uc.guc;
drivers/gpu/drm/xe/xe_guc_submit.c
567
static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
569
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
575
xe_sched_tdr_queue_imm(&q->guc->sched);
drivers/gpu/drm/xe/xe_guc_submit.c
578
static void xe_guc_exec_queue_group_trigger_cleanup(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
580
struct xe_exec_queue *primary = xe_exec_queue_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_guc_submit.c
581
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
584
xe_gt_assert(guc_to_gt(exec_queue_to_guc(q)),
drivers/gpu/drm/xe/xe_guc_submit.c
585
xe_exec_queue_is_multi_queue(q));
drivers/gpu/drm/xe/xe_guc_submit.c
597
static void xe_guc_exec_queue_reset_trigger_cleanup(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
599
if (xe_exec_queue_is_multi_queue(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
600
struct xe_exec_queue *primary = xe_exec_queue_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_guc_submit.c
601
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
619
set_exec_queue_reset(q);
drivers/gpu/drm/xe/xe_guc_submit.c
620
if (!exec_queue_banned(q))
drivers/gpu/drm/xe/xe_guc_submit.c
621
xe_guc_exec_queue_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
625
static void set_exec_queue_group_banned(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
627
struct xe_exec_queue *primary = xe_exec_queue_multi_queue_primary(q);
drivers/gpu/drm/xe/xe_guc_submit.c
628
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
632
xe_gt_assert(guc_to_gt(exec_queue_to_guc(q)),
drivers/gpu/drm/xe/xe_guc_submit.c
633
xe_exec_queue_is_multi_queue(q));
drivers/gpu/drm/xe/xe_guc_submit.c
726
struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
728
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
736
(32 + q->multi_queue.pos * 2) * sizeof(u32),
drivers/gpu/drm/xe/xe_guc_submit.c
737
u32, lower_32_bits(xe_lrc_descriptor(q->lrc[0])));
drivers/gpu/drm/xe/xe_guc_submit.c
740
(33 + q->multi_queue.pos * 2) * sizeof(u32),
drivers/gpu/drm/xe/xe_guc_submit.c
743
if (q->multi_queue.pos / 32) {
drivers/gpu/drm/xe/xe_guc_submit.c
745
u32, BIT(q->multi_queue.pos % 32));
drivers/gpu/drm/xe/xe_guc_submit.c
749
u32, BIT(q->multi_queue.pos));
drivers/gpu/drm/xe/xe_guc_submit.c
755
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
758
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
77
static bool exec_queue_registered(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
777
set_exec_queue_group_banned(q);
drivers/gpu/drm/xe/xe_guc_submit.c
778
xe_gt_reset_async(q->gt);
drivers/gpu/drm/xe/xe_guc_submit.c
779
xe_guc_exec_queue_group_trigger_cleanup(q);
drivers/gpu/drm/xe/xe_guc_submit.c
783
xe_lrc_set_multi_queue_priority(q->lrc[0], q->multi_queue.priority);
drivers/gpu/drm/xe/xe_guc_submit.c
784
xe_guc_exec_queue_group_cgp_update(xe, q);
drivers/gpu/drm/xe/xe_guc_submit.c
79
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
drivers/gpu/drm/xe/xe_guc_submit.c
791
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
815
xe_guc_exec_queue_group_cgp_sync(guc, q, action, len);
drivers/gpu/drm/xe/xe_guc_submit.c
819
struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
82
static void set_exec_queue_registered(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
825
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_multi_queue_secondary(q));
drivers/gpu/drm/xe/xe_guc_submit.c
828
action[len++] = q->multi_queue.group->primary->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
838
xe_guc_exec_queue_group_cgp_sync(guc, q, action, len);
drivers/gpu/drm/xe/xe_guc_submit.c
84
atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
842
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_guc_submit.c
850
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
drivers/gpu/drm/xe/xe_guc_submit.c
862
action[len++] = q->width;
drivers/gpu/drm/xe/xe_guc_submit.c
866
for (i = 1; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_guc_submit.c
867
struct xe_lrc *lrc = q->lrc[i];
drivers/gpu/drm/xe/xe_guc_submit.c
87
static void clear_exec_queue_registered(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
878
xe_gt_assert(guc_to_gt(guc), q->width ==
drivers/gpu/drm/xe/xe_guc_submit.c
89
atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.c
917
static void register_exec_queue(struct xe_exec_queue *q, int ctx_type)
drivers/gpu/drm/xe/xe_guc_submit.c
919
struct xe_guc *guc = exec_queue_to_guc(q);
drivers/gpu/drm/xe/xe_guc_submit.c
92
static bool exec_queue_enabled(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
921
struct xe_lrc *lrc = q->lrc[0];
drivers/gpu/drm/xe/xe_guc_submit.c
924
xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
drivers/gpu/drm/xe/xe_guc_submit.c
928
info.context_idx = q->guc->id;
drivers/gpu/drm/xe/xe_guc_submit.c
929
info.engine_class = xe_engine_class_to_guc_class(q->class);
drivers/gpu/drm/xe/xe_guc_submit.c
930
info.engine_submit_mask = q->logical_mask;
drivers/gpu/drm/xe/xe_guc_submit.c
936
if (xe_exec_queue_is_multi_queue(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
937
struct xe_exec_queue_group *group = q->multi_queue.group;
drivers/gpu/drm/xe/xe_guc_submit.c
94
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
drivers/gpu/drm/xe/xe_guc_submit.c
943
if (xe_exec_queue_is_parallel(q)) {
drivers/gpu/drm/xe/xe_guc_submit.c
957
q->guc->wqi_head = 0;
drivers/gpu/drm/xe/xe_guc_submit.c
958
q->guc->wqi_tail = 0;
drivers/gpu/drm/xe/xe_guc_submit.c
963
set_exec_queue_registered(q);
drivers/gpu/drm/xe/xe_guc_submit.c
964
trace_xe_exec_queue_register(q);
drivers/gpu/drm/xe/xe_guc_submit.c
965
if (xe_exec_queue_is_multi_queue_primary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
966
__register_exec_queue_group(guc, q, &info);
drivers/gpu/drm/xe/xe_guc_submit.c
967
else if (xe_exec_queue_is_parallel(q))
drivers/gpu/drm/xe/xe_guc_submit.c
968
__register_mlrc_exec_queue(guc, q, &info);
drivers/gpu/drm/xe/xe_guc_submit.c
969
else if (!xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
97
static void set_exec_queue_enabled(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
972
if (!xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
973
init_policies(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
975
if (xe_exec_queue_is_multi_queue_secondary(q))
drivers/gpu/drm/xe/xe_guc_submit.c
976
xe_guc_exec_queue_group_add(guc, q);
drivers/gpu/drm/xe/xe_guc_submit.c
979
static u32 wq_space_until_wrap(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_guc_submit.c
981
return (WQ_SIZE - q->guc->wqi_tail);
drivers/gpu/drm/xe/xe_guc_submit.c
99
atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
drivers/gpu/drm/xe/xe_guc_submit.h
44
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_guc_submit.h
53
void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type);
drivers/gpu/drm/xe/xe_hw_engine.c
922
xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_hw_engine.c
948
if (q) {
drivers/gpu/drm/xe/xe_hw_engine.c
950
node = xe_guc_capture_get_matching_and_lock(q);
drivers/gpu/drm/xe/xe_hw_engine.h
59
xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
125
int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_hw_engine_group.c
128
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_hw_engine_group.c
131
xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM));
drivers/gpu/drm/xe/xe_hw_engine_group.c
132
xe_assert(xe, q->vm);
drivers/gpu/drm/xe/xe_hw_engine_group.c
134
if (xe_vm_in_preempt_fence_mode(q->vm))
drivers/gpu/drm/xe/xe_hw_engine_group.c
141
if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) {
drivers/gpu/drm/xe/xe_hw_engine_group.c
142
q->ops->suspend(q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
143
err = q->ops->suspend_wait(q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
150
list_add(&q->hw_engine_group_link, &group->exec_queue_list);
drivers/gpu/drm/xe/xe_hw_engine_group.c
166
void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_hw_engine_group.c
168
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_hw_engine_group.c
171
xe_assert(xe, q->vm);
drivers/gpu/drm/xe/xe_hw_engine_group.c
175
if (!list_empty(&q->hw_engine_group_link))
drivers/gpu/drm/xe/xe_hw_engine_group.c
176
list_del(&q->hw_engine_group_link);
drivers/gpu/drm/xe/xe_hw_engine_group.c
20
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_hw_engine_group.c
202
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_hw_engine_group.c
209
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
drivers/gpu/drm/xe/xe_hw_engine_group.c
212
if (!xe_vm_in_fault_mode(q->vm))
drivers/gpu/drm/xe/xe_hw_engine_group.c
215
idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
219
xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
drivers/gpu/drm/xe/xe_hw_engine_group.c
221
xe_gt_stats_incr(q->gt,
drivers/gpu/drm/xe/xe_hw_engine_group.c
225
q->ops->suspend(q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
226
gt = q->gt;
drivers/gpu/drm/xe/xe_hw_engine_group.c
229
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
drivers/gpu/drm/xe/xe_hw_engine_group.c
230
if (!xe_vm_in_fault_mode(q->vm))
drivers/gpu/drm/xe/xe_hw_engine_group.c
233
err = q->ops->suspend_wait(q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
263
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_hw_engine_group.c
270
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
drivers/gpu/drm/xe/xe_hw_engine_group.c
271
if (xe_vm_in_lr_mode(q->vm))
drivers/gpu/drm/xe/xe_hw_engine_group.c
274
xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_WAIT_DMA_QUEUE_COUNT, 1);
drivers/gpu/drm/xe/xe_hw_engine_group.c
275
fence = xe_exec_queue_last_fence_get_for_resume(q, q->vm);
drivers/gpu/drm/xe/xe_hw_engine_group.c
278
gt = q->gt;
drivers/gpu/drm/xe/xe_hw_engine_group.c
33
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
drivers/gpu/drm/xe/xe_hw_engine_group.c
34
if (!xe_vm_in_fault_mode(q->vm))
drivers/gpu/drm/xe/xe_hw_engine_group.c
37
q->ops->resume(q);
drivers/gpu/drm/xe/xe_hw_engine_group.c
401
xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_hw_engine_group.c
403
if (xe_vm_in_fault_mode(q->vm))
drivers/gpu/drm/xe/xe_hw_engine_group.h
18
int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_hw_engine_group.h
19
void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_hw_engine_group.h
28
xe_hw_engine_group_find_exec_mode(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_lrc.c
2202
u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
drivers/gpu/drm/xe/xe_lrc.c
2204
struct xe_gt *gt = q->hwe->gt;
drivers/gpu/drm/xe/xe_lrc.c
2229
if (XE_GT_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
drivers/gpu/drm/xe/xe_lrc.h
138
u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs);
drivers/gpu/drm/xe/xe_migrate.c
105
xe_vm_lock(m->q->vm, false);
drivers/gpu/drm/xe/xe_migrate.c
1052
return migrate->q->lrc[0];
drivers/gpu/drm/xe/xe_migrate.c
107
xe_vm_unlock(m->q->vm);
drivers/gpu/drm/xe/xe_migrate.c
1096
int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_migrate.c
113
xe_vm_close_and_put(m->q->vm);
drivers/gpu/drm/xe/xe_migrate.c
114
xe_exec_queue_put(m->q);
drivers/gpu/drm/xe/xe_migrate.c
1264
return migrate->q;
drivers/gpu/drm/xe/xe_migrate.c
1357
job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
drivers/gpu/drm/xe/xe_migrate.c
1588
job = xe_bb_create_migration_job(m->q, bb,
drivers/gpu/drm/xe/xe_migrate.c
1695
return xe_vm_get(m->q->vm);
drivers/gpu/drm/xe/xe_migrate.c
1775
bool is_migrate = pt_update_ops->q == m->q;
drivers/gpu/drm/xe/xe_migrate.c
1912
job = xe_bb_create_migration_job(pt_update_ops->q, bb,
drivers/gpu/drm/xe/xe_migrate.c
2059
pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
drivers/gpu/drm/xe/xe_migrate.c
2198
job = xe_bb_create_migration_job(m->q, bb,
drivers/gpu/drm/xe/xe_migrate.c
2495
void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_migrate.c
2497
bool is_migrate = q == m->q;
drivers/gpu/drm/xe/xe_migrate.c
2502
xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */
drivers/gpu/drm/xe/xe_migrate.c
2513
void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_migrate.c
2515
bool is_migrate = q == m->q;
drivers/gpu/drm/xe/xe_migrate.c
2520
xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */
drivers/gpu/drm/xe/xe_migrate.c
2528
void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_migrate.c
2530
struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
drivers/gpu/drm/xe/xe_migrate.c
2532
xe_gt_assert(q->gt, q == m->q);
drivers/gpu/drm/xe/xe_migrate.c
471
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
drivers/gpu/drm/xe/xe_migrate.c
478
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
drivers/gpu/drm/xe/xe_migrate.c
484
if (IS_ERR(m->q)) {
drivers/gpu/drm/xe/xe_migrate.c
485
err = PTR_ERR(m->q);
drivers/gpu/drm/xe/xe_migrate.c
50
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_migrate.c
607
struct xe_vm *vm = m->q->vm;
drivers/gpu/drm/xe/xe_migrate.c
992
job = xe_bb_create_migration_job(m->q, bb,
drivers/gpu/drm/xe/xe_migrate.h
135
int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_migrate.h
169
void xe_migrate_job_lock_assert(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_migrate.h
171
static inline void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_migrate.h
176
void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_migrate.h
177
void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_oa.c
1470
struct xe_exec_queue *q = stream->exec_q;
drivers/gpu/drm/xe/xe_oa.c
1474
ret1 = q->ops->set_timeslice(q, stream->hwe->eclass->sched_props.timeslice_us);
drivers/gpu/drm/xe/xe_oa.c
1475
ret2 = q->ops->set_preempt_timeout(q, stream->hwe->eclass->sched_props.preempt_timeout_us);
drivers/gpu/drm/xe/xe_oa.c
1486
struct xe_exec_queue *q = stream->exec_q;
drivers/gpu/drm/xe/xe_oa.c
1490
ret = q->ops->set_timeslice(q, 0);
drivers/gpu/drm/xe/xe_oa.c
1494
ret = q->ops->set_preempt_timeout(q, 0);
drivers/gpu/drm/xe/xe_oa.c
613
static void xe_oa_lock_vma(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_oa.c
615
if (q->vm) {
drivers/gpu/drm/xe/xe_oa.c
616
down_read(&q->vm->lock);
drivers/gpu/drm/xe/xe_oa.c
617
xe_vm_lock(q->vm, false);
drivers/gpu/drm/xe/xe_oa.c
621
static void xe_oa_unlock_vma(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_oa.c
623
if (q->vm) {
drivers/gpu/drm/xe/xe_oa.c
624
xe_vm_unlock(q->vm);
drivers/gpu/drm/xe/xe_oa.c
625
up_read(&q->vm->lock);
drivers/gpu/drm/xe/xe_oa.c
632
struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
drivers/gpu/drm/xe/xe_oa.c
637
xe_oa_lock_vma(q);
drivers/gpu/drm/xe/xe_oa.c
639
job = xe_bb_create_job(q, bb);
drivers/gpu/drm/xe/xe_oa.c
659
xe_oa_unlock_vma(q);
drivers/gpu/drm/xe/xe_oa.c
665
xe_oa_unlock_vma(q);
drivers/gpu/drm/xe/xe_preempt_fence.c
143
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_preempt_fence.c
147
pfence->q = xe_exec_queue_get(q);
drivers/gpu/drm/xe/xe_preempt_fence.c
169
xe_preempt_fence_create(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_preempt_fence.c
178
return xe_preempt_fence_arm(pfence, q, context, seqno);
drivers/gpu/drm/xe/xe_preempt_fence.c
20
struct xe_exec_queue *q = pfence->q;
drivers/gpu/drm/xe/xe_preempt_fence.c
24
} else if (!q->ops->reset_status(q)) {
drivers/gpu/drm/xe/xe_preempt_fence.c
25
int err = q->ops->suspend_wait(q);
drivers/gpu/drm/xe/xe_preempt_fence.c
28
xe_gt_dbg(q->gt, "PREEMPT FENCE RETRY guc_id=%d",
drivers/gpu/drm/xe/xe_preempt_fence.c
29
q->guc->id);
drivers/gpu/drm/xe/xe_preempt_fence.c
30
queue_work(q->vm->xe->preempt_fence_wq,
drivers/gpu/drm/xe/xe_preempt_fence.c
53
xe_vm_queue_rebind_worker(q->vm);
drivers/gpu/drm/xe/xe_preempt_fence.c
54
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_preempt_fence.c
74
struct xe_exec_queue *q = pfence->q;
drivers/gpu/drm/xe/xe_preempt_fence.c
76
pfence->error = q->ops->suspend(q);
drivers/gpu/drm/xe/xe_preempt_fence.c
77
queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
drivers/gpu/drm/xe/xe_preempt_fence.h
14
xe_preempt_fence_create(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_preempt_fence.h
22
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_preempt_fence_types.h
25
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_pt.c
1299
if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
drivers/gpu/drm/xe/xe_pt.c
2497
static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_pt.c
2501
return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler;
drivers/gpu/drm/xe/xe_pt.c
2503
return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler;
drivers/gpu/drm/xe/xe_pt.c
2523
struct xe_exec_queue *q = pt_update_ops->q;
drivers/gpu/drm/xe/xe_pt.c
2554
to_dep_scheduler(q, tile->primary_gt);
drivers/gpu/drm/xe/xe_pt.c
2556
ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval,
drivers/gpu/drm/xe/xe_pt.c
2579
dep_scheduler = to_dep_scheduler(q, tile->media_gt);
drivers/gpu/drm/xe/xe_pt.c
2581
mjob = xe_tlb_inval_job_create(q,
drivers/gpu/drm/xe/xe_pt.c
2666
if (!(q->flags & EXEC_QUEUE_FLAG_MIGRATE))
drivers/gpu/drm/xe/xe_pt.c
2667
xe_exec_queue_last_fence_set(q, vm, fence);
drivers/gpu/drm/xe/xe_pt_types.h
101
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_pxp.c
486
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
drivers/gpu/drm/xe/xe_pxp.c
494
q->pxp.type = type;
drivers/gpu/drm/xe/xe_pxp.c
499
static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_pxp.c
511
list_add_tail(&q->pxp.link, &pxp->queues.list);
drivers/gpu/drm/xe/xe_pxp.c
655
int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_pxp.c
669
ret = pxp_start(pxp, q->pxp.type);
drivers/gpu/drm/xe/xe_pxp.c
672
ret = __exec_queue_add(pxp, q);
drivers/gpu/drm/xe/xe_pxp.c
688
static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
drivers/gpu/drm/xe/xe_pxp.c
698
if (!list_empty(&q->pxp.link)) {
drivers/gpu/drm/xe/xe_pxp.c
699
list_del_init(&q->pxp.link);
drivers/gpu/drm/xe/xe_pxp.c
703
q->pxp.type = DRM_XE_PXP_TYPE_NONE;
drivers/gpu/drm/xe/xe_pxp.c
721
void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_pxp.c
723
__pxp_exec_queue_remove(pxp, q, true);
drivers/gpu/drm/xe/xe_pxp.c
728
struct xe_exec_queue *tmp, *q;
drivers/gpu/drm/xe/xe_pxp.c
733
list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
drivers/gpu/drm/xe/xe_pxp.c
734
q = xe_exec_queue_get_unless_zero(q);
drivers/gpu/drm/xe/xe_pxp.c
735
if (!q)
drivers/gpu/drm/xe/xe_pxp.c
738
list_move_tail(&q->pxp.link, &to_clean);
drivers/gpu/drm/xe/xe_pxp.c
742
list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
drivers/gpu/drm/xe/xe_pxp.c
743
xe_exec_queue_kill(q);
drivers/gpu/drm/xe/xe_pxp.c
749
__pxp_exec_queue_remove(pxp, q, false);
drivers/gpu/drm/xe/xe_pxp.c
751
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_pxp.h
27
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type);
drivers/gpu/drm/xe/xe_pxp.h
28
int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_pxp.h
29
void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_pxp_submit.c
146
q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, hwe,
drivers/gpu/drm/xe/xe_pxp_submit.c
149
if (IS_ERR(q)) {
drivers/gpu/drm/xe/xe_pxp_submit.c
150
err = PTR_ERR(q);
drivers/gpu/drm/xe/xe_pxp_submit.c
160
gsc_res->q = q;
drivers/gpu/drm/xe/xe_pxp_submit.c
177
if (!gsc_res->q)
drivers/gpu/drm/xe/xe_pxp_submit.c
180
xe_exec_queue_put(gsc_res->q);
drivers/gpu/drm/xe/xe_pxp_submit.c
314
job = xe_sched_job_create(pxp->vcs_exec.q, &addr);
drivers/gpu/drm/xe/xe_pxp_submit.c
367
static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
drivers/gpu/drm/xe/xe_pxp_submit.c
369
struct xe_gt *gt = q->gt;
drivers/gpu/drm/xe/xe_pxp_submit.c
375
xe_assert(xe, q->hwe->engine_id == XE_HW_ENGINE_GSCCS0);
drivers/gpu/drm/xe/xe_pxp_submit.c
377
job = xe_sched_job_create(q, &batch_addr);
drivers/gpu/drm/xe/xe_pxp_submit.c
40
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_pxp_submit.c
465
ret = pxp_pkt_submit(gsc_res->q, 0);
drivers/gpu/drm/xe/xe_pxp_submit.c
48
q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, hwe,
drivers/gpu/drm/xe/xe_pxp_submit.c
50
if (IS_ERR(q))
drivers/gpu/drm/xe/xe_pxp_submit.c
51
return PTR_ERR(q);
drivers/gpu/drm/xe/xe_pxp_submit.c
65
pxp->vcs_exec.q = q;
drivers/gpu/drm/xe/xe_pxp_submit.c
71
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_pxp_submit.c
80
if (pxp->vcs_exec.q)
drivers/gpu/drm/xe/xe_pxp_submit.c
81
xe_exec_queue_put(pxp->vcs_exec.q);
drivers/gpu/drm/xe/xe_pxp_submit.c
96
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_pxp_types.h
46
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_pxp_types.h
80
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_ring_ops.c
136
static int emit_pipe_invalidate(struct xe_exec_queue *q, u32 mask_flags,
drivers/gpu/drm/xe/xe_ring_ops.c
152
if (xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_ring_ops.c
180
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_ring_ops.c
181
struct xe_gt *gt = q->gt;
drivers/gpu/drm/xe/xe_ring_ops.c
201
else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
drivers/gpu/drm/xe/xe_ring_ops.c
204
if (xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_ring_ops.c
212
static int emit_pipe_imm_ggtt(struct xe_exec_queue *q, u32 addr, u32 value,
drivers/gpu/drm/xe/xe_ring_ops.c
220
if (xe_exec_queue_is_multi_queue(q))
drivers/gpu/drm/xe/xe_ring_ops.c
230
if (job->q->vm && !job->ggtt)
drivers/gpu/drm/xe/xe_ring_ops.c
265
struct xe_gt *gt = job->q->gt;
drivers/gpu/drm/xe/xe_ring_ops.c
321
struct xe_gt *gt = job->q->gt;
drivers/gpu/drm/xe/xe_ring_ops.c
323
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
drivers/gpu/drm/xe/xe_ring_ops.c
377
struct xe_gt *gt = job->q->gt;
drivers/gpu/drm/xe/xe_ring_ops.c
389
else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
drivers/gpu/drm/xe/xe_ring_ops.c
393
i = emit_pipe_invalidate(job->q, mask_flags, job->ring_ops_flush_tlb, dw, i);
drivers/gpu/drm/xe/xe_ring_ops.c
416
i = emit_pipe_imm_ggtt(job->q, xe_lrc_seqno_ggtt_addr(lrc), seqno, lacks_render, dw, i);
drivers/gpu/drm/xe/xe_ring_ops.c
429
struct xe_gt *gt = job->q->gt;
drivers/gpu/drm/xe/xe_ring_ops.c
456
xe_gt_assert(job->q->gt, i <= MAX_JOB_SIZE_DW);
drivers/gpu/drm/xe/xe_ring_ops.c
463
struct xe_gt *gt = job->q->gt;
drivers/gpu/drm/xe/xe_ring_ops.c
465
xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
drivers/gpu/drm/xe/xe_ring_ops.c
467
__emit_job_gen12_simple(job, job->q->lrc[0],
drivers/gpu/drm/xe/xe_ring_ops.c
477
if (xe_sched_job_is_migration(job->q)) {
drivers/gpu/drm/xe/xe_ring_ops.c
478
emit_migration_job_gen12(job, job->q->lrc[0],
drivers/gpu/drm/xe/xe_ring_ops.c
484
for (i = 0; i < job->q->width; ++i)
drivers/gpu/drm/xe/xe_ring_ops.c
485
__emit_job_gen12_simple(job, job->q->lrc[i],
drivers/gpu/drm/xe/xe_ring_ops.c
496
for (i = 0; i < job->q->width; ++i)
drivers/gpu/drm/xe/xe_ring_ops.c
497
__emit_job_gen12_video(job, job->q->lrc[i],
drivers/gpu/drm/xe/xe_ring_ops.c
507
for (i = 0; i < job->q->width; ++i)
drivers/gpu/drm/xe/xe_ring_ops.c
508
__emit_job_gen12_render_compute(job, job->q->lrc[i],
drivers/gpu/drm/xe/xe_sched_job.c
106
XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
drivers/gpu/drm/xe/xe_sched_job.c
108
job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
drivers/gpu/drm/xe/xe_sched_job.c
112
job->q = q;
drivers/gpu/drm/xe/xe_sched_job.c
115
xe_exec_queue_get(job->q);
drivers/gpu/drm/xe/xe_sched_job.c
117
err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
drivers/gpu/drm/xe/xe_sched_job.c
118
q->xef ? q->xef->drm->client_id : 0);
drivers/gpu/drm/xe/xe_sched_job.c
122
for (i = 0; i < q->width; ++i) {
drivers/gpu/drm/xe/xe_sched_job.c
132
if (i + 1 == q->width)
drivers/gpu/drm/xe/xe_sched_job.c
143
width = q->width;
drivers/gpu/drm/xe/xe_sched_job.c
150
atomic_inc(&q->job_cnt);
drivers/gpu/drm/xe/xe_sched_job.c
159
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_sched_job.c
176
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_sched_job.c
182
atomic_dec(&q->job_cnt);
drivers/gpu/drm/xe/xe_sched_job.c
183
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_sched_job.c
218
xe_hw_fence_irq_run(job->q->fence_irq);
drivers/gpu/drm/xe/xe_sched_job.c
224
struct xe_lrc *lrc = job->q->lrc[0];
drivers/gpu/drm/xe/xe_sched_job.c
234
struct xe_lrc *lrc = job->q->lrc[0];
drivers/gpu/drm/xe/xe_sched_job.c
248
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_sched_job.c
250
struct xe_vm *vm = q->vm;
drivers/gpu/drm/xe/xe_sched_job.c
256
!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
drivers/gpu/drm/xe/xe_sched_job.c
257
lockdep_assert_held(&q->vm->lock);
drivers/gpu/drm/xe/xe_sched_job.c
258
if (!xe_vm_in_lr_mode(q->vm))
drivers/gpu/drm/xe/xe_sched_job.c
259
xe_vm_assert_held(q->vm);
drivers/gpu/drm/xe/xe_sched_job.c
262
if (vm && !xe_sched_job_is_migration(q) && !xe_vm_in_lr_mode(vm) &&
drivers/gpu/drm/xe/xe_sched_job.c
263
(vm->batch_invalidate_tlb || vm->tlb_flush_seqno != q->tlb_flush_seqno)) {
drivers/gpu/drm/xe/xe_sched_job.c
265
q->tlb_flush_seqno = vm->tlb_flush_seqno;
drivers/gpu/drm/xe/xe_sched_job.c
270
for (i = 0; i < q->width; prev = fence, ++i) {
drivers/gpu/drm/xe/xe_sched_job.c
274
xe_lrc_init_seqno_fence(q->lrc[i], fence);
drivers/gpu/drm/xe/xe_sched_job.c
280
xe_assert(gt_to_xe(q->gt), job->lrc_seqno == fence->seqno);
drivers/gpu/drm/xe/xe_sched_job.c
320
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_sched_job.c
321
struct xe_device *xe = q->gt->tile->xe;
drivers/gpu/drm/xe/xe_sched_job.c
323
size_t len = sizeof(*snapshot) + (sizeof(u64) * q->width);
drivers/gpu/drm/xe/xe_sched_job.c
330
snapshot->batch_addr_len = q->width;
drivers/gpu/drm/xe/xe_sched_job.c
331
for (i = 0; i < q->width; i++)
drivers/gpu/drm/xe/xe_sched_job.c
63
bool xe_sched_job_is_migration(struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_sched_job.c
65
return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION);
drivers/gpu/drm/xe/xe_sched_job.c
70
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_sched_job.c
71
bool is_migration = xe_sched_job_is_migration(q);
drivers/gpu/drm/xe/xe_sched_job.c
73
kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
drivers/gpu/drm/xe/xe_sched_job.c
79
return gt_to_xe(job->q->gt);
drivers/gpu/drm/xe/xe_sched_job.c
87
for (i = 0; i < job->q->width; ++i) {
drivers/gpu/drm/xe/xe_sched_job.c
96
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_sched_job.c
99
bool is_migration = xe_sched_job_is_migration(q);
drivers/gpu/drm/xe/xe_sched_job.h
21
struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_sched_job.h
86
bool xe_sched_job_is_migration(struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_sched_job_types.h
41
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
335
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
353
q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0);
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
354
if (IS_ERR(q)) {
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
355
err = PTR_ERR(q);
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
358
ctx->mig_q = q;
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
382
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_sync.c
333
struct xe_exec_queue *q, struct xe_vm *vm)
drivers/gpu/drm/xe/xe_sync.c
347
if (q->flags & EXEC_QUEUE_FLAG_VM) {
drivers/gpu/drm/xe/xe_sync.c
363
xe_exec_queue_last_fence_get(q, vm);
drivers/gpu/drm/xe/xe_sync.c
366
xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
drivers/gpu/drm/xe/xe_sync.c
367
list_for_each_entry(__q, &q->multi_gt_list,
drivers/gpu/drm/xe/xe_sync.c
386
fence = xe_exec_queue_last_fence_get(q, vm);
drivers/gpu/drm/xe/xe_sync.h
37
struct xe_exec_queue *q, struct xe_vm *vm);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
115
job->q = q;
drivers/gpu/drm/xe/xe_tlb_inval_job.c
125
xe_exec_queue_get(q); /* Pairs with put in xe_tlb_inval_job_destroy */
drivers/gpu/drm/xe/xe_tlb_inval_job.c
136
q->xef ? q->xef->drm->client_id : 0);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
141
xe_pm_runtime_get_noresume(gt_to_xe(q->gt));
drivers/gpu/drm/xe/xe_tlb_inval_job.c
149
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
166
struct xe_device *xe = gt_to_xe(job->q->gt);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
168
xe_gt_WARN_ON(job->q->gt, !xe->info.has_page_reclaim_hw_assist);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
180
struct xe_exec_queue *q = job->q;
drivers/gpu/drm/xe/xe_tlb_inval_job.c
181
struct xe_device *xe = gt_to_xe(q->gt);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
196
xe_exec_queue_put(q); /* Pairs with get from xe_tlb_inval_job_create */
drivers/gpu/drm/xe/xe_tlb_inval_job.c
212
xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0));
drivers/gpu/drm/xe/xe_tlb_inval_job.c
248
xe_assert(gt_to_xe(job->q->gt),
drivers/gpu/drm/xe/xe_tlb_inval_job.c
255
xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr));
drivers/gpu/drm/xe/xe_tlb_inval_job.c
26
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_tlb_inval_job.c
266
xe_migrate_job_lock(m, job->q);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
281
xe_exec_queue_tlb_inval_last_fence_set(job->q, job->vm,
drivers/gpu/drm/xe/xe_tlb_inval_job.c
285
xe_migrate_job_unlock(m, job->q);
drivers/gpu/drm/xe/xe_tlb_inval_job.c
98
xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
drivers/gpu/drm/xe/xe_tlb_inval_job.h
21
xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
drivers/gpu/drm/xe/xe_trace.h
102
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
103
TP_ARGS(q),
drivers/gpu/drm/xe/xe_trace.h
106
__string(dev, __dev_name_eq(q))
drivers/gpu/drm/xe/xe_trace.h
119
__entry->class = q->class;
drivers/gpu/drm/xe/xe_trace.h
120
__entry->logical_mask = q->logical_mask;
drivers/gpu/drm/xe/xe_trace.h
121
__entry->gt_id = q->gt->info.id;
drivers/gpu/drm/xe/xe_trace.h
122
__entry->width = q->width;
drivers/gpu/drm/xe/xe_trace.h
123
__entry->guc_id = q->guc->id;
drivers/gpu/drm/xe/xe_trace.h
124
__entry->guc_state = atomic_read(&q->guc->state);
drivers/gpu/drm/xe/xe_trace.h
125
__entry->flags = q->flags;
drivers/gpu/drm/xe/xe_trace.h
126
__entry->primary = xe_exec_queue_multi_queue_primary(q)->guc->id;
drivers/gpu/drm/xe/xe_trace.h
137
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
138
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
142
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
143
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
147
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
148
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
152
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
153
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
157
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
158
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
162
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
163
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
167
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
168
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
172
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
173
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
177
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
178
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
182
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
183
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
187
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
188
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
192
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
193
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
197
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
198
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
202
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
203
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
207
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
208
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
212
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
213
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
217
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
218
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
222
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
223
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
227
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
228
TP_ARGS(q)
drivers/gpu/drm/xe/xe_trace.h
236
__string(dev, __dev_name_eq(job->q))
drivers/gpu/drm/xe/xe_trace.h
252
__entry->gt_id = job->q->gt->info.id;
drivers/gpu/drm/xe/xe_trace.h
253
__entry->guc_id = job->q->guc->id;
drivers/gpu/drm/xe/xe_trace.h
255
atomic_read(&job->q->guc->state);
drivers/gpu/drm/xe/xe_trace.h
256
__entry->flags = job->q->flags;
drivers/gpu/drm/xe/xe_trace.h
27
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
drivers/gpu/drm/xe/xe_trace.h
486
TP_PROTO(struct xe_exec_queue *q, int max_cnt),
drivers/gpu/drm/xe/xe_trace.h
487
TP_ARGS(q, max_cnt),
drivers/gpu/drm/xe/xe_trace.h
489
TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
drivers/gpu/drm/xe/xe_trace.h
497
__entry->class = q->class;
drivers/gpu/drm/xe/xe_trace.h
498
__entry->logical_mask = q->logical_mask;
drivers/gpu/drm/xe/xe_trace.h
499
__entry->guc_id = q->guc->id;
drivers/gpu/drm/xe/xe_trace.h
70
TP_PROTO(struct xe_exec_queue *q),
drivers/gpu/drm/xe/xe_trace.h
71
TP_ARGS(q),
drivers/gpu/drm/xe/xe_trace.h
74
__string(dev, __dev_name_eq(q))
drivers/gpu/drm/xe/xe_trace.h
86
__entry->class = q->class;
drivers/gpu/drm/xe/xe_trace.h
87
__entry->logical_mask = q->logical_mask;
drivers/gpu/drm/xe/xe_trace.h
88
__entry->gt_id = q->gt->info.id;
drivers/gpu/drm/xe/xe_trace.h
89
__entry->width = q->width;
drivers/gpu/drm/xe/xe_trace.h
90
__entry->guc_id = q->guc->id;
drivers/gpu/drm/xe/xe_trace.h
91
__entry->guc_state = atomic_read(&q->guc->state);
drivers/gpu/drm/xe/xe_trace.h
92
__entry->flags = q->flags;
drivers/gpu/drm/xe/xe_vm.c
112
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
119
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
drivers/gpu/drm/xe/xe_vm.c
120
if (q->lr.pfence) {
drivers/gpu/drm/xe/xe_vm.c
123
timeout = dma_fence_wait_timeout(q->lr.pfence, false,
drivers/gpu/drm/xe/xe_vm.c
131
if (timeout < 0 || q->lr.pfence->error == -ETIME)
drivers/gpu/drm/xe/xe_vm.c
134
dma_fence_put(q->lr.pfence);
drivers/gpu/drm/xe/xe_vm.c
135
q->lr.pfence = NULL;
drivers/gpu/drm/xe/xe_vm.c
144
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
147
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
drivers/gpu/drm/xe/xe_vm.c
148
if (!xe_exec_queue_is_idle(q))
drivers/gpu/drm/xe/xe_vm.c
158
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
160
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
drivers/gpu/drm/xe/xe_vm.c
1635
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
1641
q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0);
drivers/gpu/drm/xe/xe_vm.c
1642
if (IS_ERR(q)) {
drivers/gpu/drm/xe/xe_vm.c
1643
err = PTR_ERR(q);
drivers/gpu/drm/xe/xe_vm.c
1646
vm->q[id] = q;
drivers/gpu/drm/xe/xe_vm.c
167
q, q->lr.context,
drivers/gpu/drm/xe/xe_vm.c
168
++q->lr.seqno);
drivers/gpu/drm/xe/xe_vm.c
169
dma_fence_put(q->lr.pfence);
drivers/gpu/drm/xe/xe_vm.c
170
q->lr.pfence = fence;
drivers/gpu/drm/xe/xe_vm.c
1755
if (vm->q[id]) {
drivers/gpu/drm/xe/xe_vm.c
1758
xe_exec_queue_last_fence_put(vm->q[id], vm);
drivers/gpu/drm/xe/xe_vm.c
176
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
1760
xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i);
drivers/gpu/drm/xe/xe_vm.c
1766
if (vm->q[id]) {
drivers/gpu/drm/xe/xe_vm.c
1767
xe_exec_queue_kill(vm->q[id]);
drivers/gpu/drm/xe/xe_vm.c
1768
xe_exec_queue_put(vm->q[id]);
drivers/gpu/drm/xe/xe_vm.c
1769
vm->q[id] = NULL;
drivers/gpu/drm/xe/xe_vm.c
188
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
drivers/gpu/drm/xe/xe_vm.c
189
if (q->lr.pfence) {
drivers/gpu/drm/xe/xe_vm.c
1900
to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_vm.c
1902
return q ? q : vm->q[0];
drivers/gpu/drm/xe/xe_vm.c
191
q->lr.pfence,
drivers/gpu/drm/xe/xe_vm.c
201
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
206
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
drivers/gpu/drm/xe/xe_vm.c
207
q->ops->resume(q);
drivers/gpu/drm/xe/xe_vm.c
209
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
drivers/gpu/drm/xe/xe_vm.c
214
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_vm.c
234
pfence = xe_preempt_fence_create(q, q->lr.context,
drivers/gpu/drm/xe/xe_vm.c
235
++q->lr.seqno);
drivers/gpu/drm/xe/xe_vm.c
241
list_add(&q->lr.link, &vm->preempt.exec_queues);
drivers/gpu/drm/xe/xe_vm.c
243
q->lr.pfence = pfence;
drivers/gpu/drm/xe/xe_vm.c
277
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
drivers/gpu/drm/xe/xe_vm.c
283
if (!list_empty(&q->lr.link)) {
drivers/gpu/drm/xe/xe_vm.c
284
list_del_init(&q->lr.link);
drivers/gpu/drm/xe/xe_vm.c
287
if (q->lr.pfence) {
drivers/gpu/drm/xe/xe_vm.c
288
dma_fence_enable_sw_signaling(q->lr.pfence);
drivers/gpu/drm/xe/xe_vm.c
289
dma_fence_put(q->lr.pfence);
drivers/gpu/drm/xe/xe_vm.c
290
q->lr.pfence = NULL;
drivers/gpu/drm/xe/xe_vm.c
307
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
3132
struct xe_exec_queue *q = vops->q;
drivers/gpu/drm/xe/xe_vm.c
3141
if (vops->pt_update_ops[id].q)
drivers/gpu/drm/xe/xe_vm.c
3144
if (q) {
drivers/gpu/drm/xe/xe_vm.c
3145
vops->pt_update_ops[id].q = q;
drivers/gpu/drm/xe/xe_vm.c
3146
if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
drivers/gpu/drm/xe/xe_vm.c
3147
q = list_next_entry(q, multi_gt_list);
drivers/gpu/drm/xe/xe_vm.c
3149
vops->pt_update_ops[id].q = vm->q[id];
drivers/gpu/drm/xe/xe_vm.c
317
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
drivers/gpu/drm/xe/xe_vm.c
318
q->ops->kill(q);
drivers/gpu/drm/xe/xe_vm.c
3204
struct xe_exec_queue *q = vops->pt_update_ops[tile->id].q;
drivers/gpu/drm/xe/xe_vm.c
3219
xe_migrate_job_lock(tile->migrate, q);
drivers/gpu/drm/xe/xe_vm.c
3222
xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
drivers/gpu/drm/xe/xe_vm.c
3223
xe_migrate_job_unlock(tile->migrate, q);
drivers/gpu/drm/xe/xe_vm.c
3508
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_vm.c
3517
to_wait_exec_queue(vm, q), vm);
drivers/gpu/drm/xe/xe_vm.c
3531
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_vm.c
3537
vops->q = q;
drivers/gpu/drm/xe/xe_vm.c
3620
struct xe_exec_queue *q = NULL;
drivers/gpu/drm/xe/xe_vm.c
3638
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
drivers/gpu/drm/xe/xe_vm.c
3639
if (XE_IOCTL_DBG(xe, !q)) {
drivers/gpu/drm/xe/xe_vm.c
3644
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
drivers/gpu/drm/xe/xe_vm.c
3650
if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) {
drivers/gpu/drm/xe/xe_vm.c
3731
struct xe_exec_queue *__q = q ?: vm->q[0];
drivers/gpu/drm/xe/xe_vm.c
3758
xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
drivers/gpu/drm/xe/xe_vm.c
3822
err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs);
drivers/gpu/drm/xe/xe_vm.c
3837
if (q)
drivers/gpu/drm/xe/xe_vm.c
3838
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_vm.c
3862
struct xe_exec_queue *q, u64 addr,
drivers/gpu/drm/xe/xe_vm.c
3872
if (q)
drivers/gpu/drm/xe/xe_vm.c
3873
xe_exec_queue_get(q);
drivers/gpu/drm/xe/xe_vm.c
3877
xe_vma_ops_init(&vops, vm, q, NULL, 0);
drivers/gpu/drm/xe/xe_vm.c
3911
if (q)
drivers/gpu/drm/xe/xe_vm.c
3912
xe_exec_queue_put(q);
drivers/gpu/drm/xe/xe_vm.c
65
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_vm.c
684
struct xe_exec_queue *q,
drivers/gpu/drm/xe/xe_vm.c
70
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
drivers/gpu/drm/xe/xe_vm.c
71
if (!q->lr.pfence ||
drivers/gpu/drm/xe/xe_vm.c
73
&q->lr.pfence->flags)) {
drivers/gpu/drm/xe/xe_vm.c
759
vops.pt_update_ops[tile->id].q =
drivers/gpu/drm/xe/xe_vm.c
850
vops.pt_update_ops[tile->id].q =
drivers/gpu/drm/xe/xe_vm.c
933
vops.pt_update_ops[tile->id].q =
drivers/gpu/drm/xe/xe_vm.h
223
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_vm.h
224
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
drivers/gpu/drm/xe/xe_vm.h
272
struct xe_exec_queue *q, u64 addr,
drivers/gpu/drm/xe/xe_vm_types.h
212
struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
drivers/gpu/drm/xe/xe_vm_types.h
475
struct xe_exec_queue *q;
drivers/gpu/drm/xe/xe_wait_user_fence.c
107
struct xe_exec_queue *q = NULL;
drivers/gpu/drm/xe/xe_wait_user_fence.c
128
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
drivers/gpu/drm/xe/xe_wait_user_fence.c
129
if (XE_IOCTL_DBG(xe, !q))
drivers/gpu/drm/xe/xe_wait_user_fence.c
148
if (q) {
drivers/gpu/drm/xe/xe_wait_user_fence.c
149
if (q->ops->reset_status(q)) {
drivers/gpu/drm/xe/xe_wait_user_fence.c
178
if (q)
drivers/gpu/drm/xe/xe_wait_user_fence.c
179
xe_exec_queue_put(q);
drivers/gpu/drm/xen/xen_drm_front.c
54
struct xen_drm_front_dbuf *buf, *q;
drivers/gpu/drm/xen/xen_drm_front.c
56
list_for_each_entry_safe(buf, q, dbuf_list, list)
drivers/gpu/drm/xen/xen_drm_front.c
65
struct xen_drm_front_dbuf *buf, *q;
drivers/gpu/drm/xen/xen_drm_front.c
67
list_for_each_entry_safe(buf, q, dbuf_list, list)
drivers/gpu/drm/xen/xen_drm_front.c
79
struct xen_drm_front_dbuf *buf, *q;
drivers/gpu/drm/xen/xen_drm_front.c
81
list_for_each_entry_safe(buf, q, dbuf_list, list) {
drivers/gpu/ipu-v3/ipu-ic-csc.c
13
#define QUANT_MAP(q) \
drivers/gpu/ipu-v3/ipu-ic-csc.c
14
((q) == V4L2_QUANTIZATION_FULL_RANGE || \
drivers/gpu/ipu-v3/ipu-ic-csc.c
15
(q) == V4L2_QUANTIZATION_DEFAULT ? 0 : 1)
drivers/gpu/ipu-v3/ipu-image-convert.c
1237
struct list_head *q)
drivers/gpu/ipu-v3/ipu-image-convert.c
1244
list_for_each_entry(run, q, list) {
drivers/hid/hid-quirks.c
1129
struct quirks_list_struct *q;
drivers/hid/hid-quirks.c
1132
list_for_each_entry(q, &dquirks_list, node) {
drivers/hid/hid-quirks.c
1133
if (hid_match_one_id(hdev, &q->hid_bl_item)) {
drivers/hid/hid-quirks.c
1134
bl_entry = &q->hid_bl_item;
drivers/hid/hid-quirks.c
1164
struct quirks_list_struct *q_new, *q;
drivers/hid/hid-quirks.c
1186
list_for_each_entry(q, &dquirks_list, node) {
drivers/hid/hid-quirks.c
1188
if (hid_match_one_id(hdev, &q->hid_bl_item)) {
drivers/hid/hid-quirks.c
1190
list_replace(&q->node, &q_new->node);
drivers/hid/hid-quirks.c
1191
kfree(q);
drivers/hid/hid-quirks.c
1220
struct quirks_list_struct *q, *temp;
drivers/hid/hid-quirks.c
1223
list_for_each_entry_safe(q, temp, &dquirks_list, node) {
drivers/hid/hid-quirks.c
1224
if (bus == HID_BUS_ANY || bus == q->hid_bl_item.bus) {
drivers/hid/hid-quirks.c
1225
list_del(&q->node);
drivers/hid/hid-quirks.c
1226
kfree(q);
drivers/i2c/busses/i2c-au1550.c
119
if (q)
drivers/i2c/busses/i2c-au1550.c
127
return (q) ? wait_controller_done(adap) : 0;
drivers/i2c/busses/i2c-au1550.c
98
do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q)
drivers/i2c/i2c-core-base.c
2157
const struct i2c_adapter_quirks *q = adap->quirks;
drivers/i2c/i2c-core-base.c
2158
int max_num = q->max_num_msgs, i;
drivers/i2c/i2c-core-base.c
2161
if (q->flags & I2C_AQ_COMB) {
drivers/i2c/i2c-core-base.c
2166
if (q->flags & I2C_AQ_COMB_WRITE_FIRST && msgs[0].flags & I2C_M_RD)
drivers/i2c/i2c-core-base.c
2169
if (q->flags & I2C_AQ_COMB_READ_SECOND && !(msgs[1].flags & I2C_M_RD))
drivers/i2c/i2c-core-base.c
2172
if (q->flags & I2C_AQ_COMB_SAME_ADDR && msgs[0].addr != msgs[1].addr)
drivers/i2c/i2c-core-base.c
2175
if (i2c_quirk_exceeded(msgs[0].len, q->max_comb_1st_msg_len))
drivers/i2c/i2c-core-base.c
2178
if (i2c_quirk_exceeded(msgs[1].len, q->max_comb_2nd_msg_len))
drivers/i2c/i2c-core-base.c
2192
if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len))
drivers/i2c/i2c-core-base.c
2195
if (q->flags & I2C_AQ_NO_ZERO_LEN_READ && len == 0)
drivers/i2c/i2c-core-base.c
2198
if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len))
drivers/i2c/i2c-core-base.c
2201
if (q->flags & I2C_AQ_NO_ZERO_LEN_WRITE && len == 0)
drivers/iio/common/st_sensors/st_sensors_core.c
639
int i, len = 0, q, r;
drivers/iio/common/st_sensors/st_sensors_core.c
647
q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000;
drivers/iio/common/st_sensors/st_sensors_core.c
650
len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r);
drivers/iio/industrialio-buffer.c
1012
struct iio_demux_table *p, *q;
drivers/iio/industrialio-buffer.c
1014
list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
drivers/infiniband/hw/hfi1/affinity.c
184
struct list_head *pos, *q;
drivers/infiniband/hw/hfi1/affinity.c
188
list_for_each_safe(pos, q, &node_affinity.list) {
drivers/infiniband/hw/hfi1/ipoib.h
169
void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q);
drivers/infiniband/hw/hfi1/ipoib_tx.c
841
void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
drivers/infiniband/hw/hfi1/ipoib_tx.c
844
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
drivers/infiniband/hw/hfi1/ipoib_tx.c
847
txq, q,
drivers/infiniband/hw/hfi1/mad.c
100
list_for_each_entry_safe(node, q, &trap_list, list) {
drivers/infiniband/hw/hfi1/mad.c
1020
q = (u16 *)data;
drivers/infiniband/hw/hfi1/mad.c
1023
get_pkeys(dd, port, q);
drivers/infiniband/hw/hfi1/mad.c
1025
p[i] = cpu_to_be16(q[i]);
drivers/infiniband/hw/hfi1/mad.c
1686
__be16 *q = (__be16 *)data;
drivers/infiniband/hw/hfi1/mad.c
1718
p[i] = be16_to_cpu(q[i]);
drivers/infiniband/hw/hfi1/mad.c
85
struct trap_node *node, *q;
drivers/infiniband/hw/hfi1/mad.c
987
u16 *q;
drivers/infiniband/hw/hfi1/mad.h
389
#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
drivers/infiniband/hw/ionic/ionic_admin.c
1010
ionic_queue_dbell_init(&eq->q, eq->eqid);
drivers/infiniband/hw/ionic/ionic_admin.c
1013
eq->q.cons = true;
drivers/infiniband/hw/ionic/ionic_admin.c
1029
rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
drivers/infiniband/hw/ionic/ionic_admin.c
1045
ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_admin.c
1061
ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_admin.c
126
old_prod = cq->q.prod;
drivers/infiniband/hw/ionic/ionic_admin.c
145
if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
drivers/infiniband/hw/ionic/ionic_admin.c
149
aq->q.cons, qid);
drivers/infiniband/hw/ionic/ionic_admin.c
153
if (unlikely(ionic_queue_empty(&aq->q))) {
drivers/infiniband/hw/ionic/ionic_admin.c
159
wr = aq->q_wr[aq->q.cons].wr;
drivers/infiniband/hw/ionic/ionic_admin.c
161
aq->q_wr[aq->q.cons].wr = NULL;
drivers/infiniband/hw/ionic/ionic_admin.c
169
ionic_queue_consume_entries(&aq->q,
drivers/infiniband/hw/ionic/ionic_admin.c
170
aq->q_wr[aq->q.cons].wqe_strides);
drivers/infiniband/hw/ionic/ionic_admin.c
173
ionic_queue_produce(&cq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
174
cq->color = ionic_color_wrap(cq->q.prod, cq->color);
drivers/infiniband/hw/ionic/ionic_admin.c
177
if (old_prod != cq->q.prod) {
drivers/infiniband/hw/ionic/ionic_admin.c
179
cq->q.cons = cq->q.prod;
drivers/infiniband/hw/ionic/ionic_admin.c
181
ionic_queue_dbell_val(&cq->q));
drivers/infiniband/hw/ionic/ionic_admin.c
185
cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
drivers/infiniband/hw/ionic/ionic_admin.c
187
cq->q.dbell | IONIC_CQ_RING_ARM |
drivers/infiniband/hw/ionic/ionic_admin.c
195
old_prod = aq->q.prod;
drivers/infiniband/hw/ionic/ionic_admin.c
197
if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
drivers/infiniband/hw/ionic/ionic_admin.c
211
(ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
drivers/infiniband/hw/ionic/ionic_admin.c
212
avlbl_strides = ionic_queue_length_remaining(&aq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
218
wr->status = aq->q.prod;
drivers/infiniband/hw/ionic/ionic_admin.c
219
aq->q_wr[aq->q.prod].wr = wr;
drivers/infiniband/hw/ionic/ionic_admin.c
220
aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
drivers/infiniband/hw/ionic/ionic_admin.c
226
memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
drivers/infiniband/hw/ionic/ionic_admin.c
231
memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
drivers/infiniband/hw/ionic/ionic_admin.c
234
aq->q.prod, wr_strides);
drivers/infiniband/hw/ionic/ionic_admin.c
236
ionic_queue_at_prod(&aq->q),
drivers/infiniband/hw/ionic/ionic_admin.c
237
BIT(aq->q.stride_log2), true);
drivers/infiniband/hw/ionic/ionic_admin.c
238
ionic_queue_produce(&aq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
247
memcpy(ionic_queue_at_prod(&aq->q), src + i,
drivers/infiniband/hw/ionic/ionic_admin.c
250
ionic_queue_at_prod(&aq->q),
drivers/infiniband/hw/ionic/ionic_admin.c
251
BIT(aq->q.stride_log2), true);
drivers/infiniband/hw/ionic/ionic_admin.c
252
ionic_queue_produce(&aq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
256
if (old_prod != aq->q.prod)
drivers/infiniband/hw/ionic/ionic_admin.c
258
ionic_queue_dbell_val(&aq->q));
drivers/infiniband/hw/ionic/ionic_admin.c
277
if (ionic_queue_empty(&aq->q))
drivers/infiniband/hw/ionic/ionic_admin.c
288
pos = aq->q.cons;
drivers/infiniband/hw/ionic/ionic_admin.c
290
if (pos != aq->q.cons) {
drivers/infiniband/hw/ionic/ionic_admin.c
38
if (ionic_queue_empty(&aq->q))
drivers/infiniband/hw/ionic/ionic_admin.c
472
struct ionic_queue *q,
drivers/infiniband/hw/ionic/ionic_admin.c
48
pos = (aq->q.prod - 1) & aq->q.mask;
drivers/infiniband/hw/ionic/ionic_admin.c
483
.depth_log2 = q->depth_log2,
drivers/infiniband/hw/ionic/ionic_admin.c
484
.stride_log2 = q->stride_log2,
drivers/infiniband/hw/ionic/ionic_admin.c
485
.dma_addr = cpu_to_le64(q->dma),
drivers/infiniband/hw/ionic/ionic_admin.c
49
if (pos == aq->q.cons)
drivers/infiniband/hw/ionic/ionic_admin.c
54
ionic_queue_at(&aq->q, pos),
drivers/infiniband/hw/ionic/ionic_admin.c
540
rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
drivers/infiniband/hw/ionic/ionic_admin.c
55
BIT(aq->q.stride_log2), true);
drivers/infiniband/hw/ionic/ionic_admin.c
571
rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
drivers/infiniband/hw/ionic/ionic_admin.c
576
ionic_queue_dbell_init(&aq->q, aq->aqid);
drivers/infiniband/hw/ionic/ionic_admin.c
578
aq->q_wr = kzalloc_objs(*aq->q_wr, (u32)aq->q.mask + 1);
drivers/infiniband/hw/ionic/ionic_admin.c
593
ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_admin.c
604
ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_admin.c
618
rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
drivers/infiniband/hw/ionic/ionic_admin.c
771
qeqe = ionic_queue_at_prod(&eq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
775
if (eq->q.cons != color)
drivers/infiniband/hw/ionic/ionic_admin.c
781
ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
drivers/infiniband/hw/ionic/ionic_admin.c
783
qeqe, BIT(eq->q.stride_log2), true);
drivers/infiniband/hw/ionic/ionic_admin.c
82
struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
905
ionic_queue_produce(&eq->q);
drivers/infiniband/hw/ionic/ionic_admin.c
908
eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
drivers/infiniband/hw/ionic/ionic_admin.c
992
rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
drivers/infiniband/hw/ionic/ionic_controlpath.c
1173
.depth_log2 = cq->q.depth_log2,
drivers/infiniband/hw/ionic/ionic_controlpath.c
1174
.stride_log2 = cq->q.stride_log2,
drivers/infiniband/hw/ionic/ionic_controlpath.c
120
cq->q.ptr = NULL;
drivers/infiniband/hw/ionic/ionic_controlpath.c
121
cq->q.size = req_cq->size;
drivers/infiniband/hw/ionic/ionic_controlpath.c
122
cq->q.mask = req_cq->mask;
drivers/infiniband/hw/ionic/ionic_controlpath.c
123
cq->q.depth_log2 = req_cq->depth_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
124
cq->q.stride_log2 = req_cq->stride_log2;
drivers/infiniband/hw/ionic/ionic_controlpath.c
128
rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
drivers/infiniband/hw/ionic/ionic_controlpath.c
134
ionic_queue_dbell_init(&cq->q, cq->cqid);
drivers/infiniband/hw/ionic/ionic_controlpath.c
136
cq->credit = cq->q.mask;
drivers/infiniband/hw/ionic/ionic_controlpath.c
139
rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
drivers/infiniband/hw/ionic/ionic_controlpath.c
162
ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_controlpath.c
17
static int ionic_validate_qdesc(struct ionic_qdesc *q)
drivers/infiniband/hw/ionic/ionic_controlpath.c
187
ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
drivers/infiniband/hw/ionic/ionic_controlpath.c
19
if (!q->addr || !q->size || !q->mask ||
drivers/infiniband/hw/ionic/ionic_controlpath.c
194
static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
drivers/infiniband/hw/ionic/ionic_controlpath.c
196
if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
drivers/infiniband/hw/ionic/ionic_controlpath.c
20
!q->depth_log2 || !q->stride_log2)
drivers/infiniband/hw/ionic/ionic_controlpath.c
23
if (q->addr & (PAGE_SIZE - 1))
drivers/infiniband/hw/ionic/ionic_controlpath.c
2452
if (!cq->q.ptr)
drivers/infiniband/hw/ionic/ionic_controlpath.c
2456
prod = cq->q.prod;
drivers/infiniband/hw/ionic/ionic_controlpath.c
2457
qcqe = ionic_queue_at(&cq->q, prod);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2467
prod = ionic_queue_next(&cq->q, prod);
drivers/infiniband/hw/ionic/ionic_controlpath.c
2468
qcqe = ionic_queue_at(&cq->q, prod);
drivers/infiniband/hw/ionic/ionic_controlpath.c
26
if (q->mask != BIT(q->depth_log2) - 1)
drivers/infiniband/hw/ionic/ionic_controlpath.c
29
if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
drivers/infiniband/hw/ionic/ionic_datapath.c
18
struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
drivers/infiniband/hw/ionic/ionic_datapath.c
200
cqe, BIT(cq->q.stride_log2), true);
drivers/infiniband/hw/ionic/ionic_datapath.c
452
if (!ionic_queue_empty(&cq->q)) {
drivers/infiniband/hw/ionic/ionic_datapath.c
453
cq->credit += ionic_queue_length(&cq->q);
drivers/infiniband/hw/ionic/ionic_datapath.c
454
cq->q.cons = cq->q.prod;
drivers/infiniband/hw/ionic/ionic_datapath.c
457
ionic_queue_dbell_val(&cq->q));
drivers/infiniband/hw/ionic/ionic_datapath.c
587
ionic_queue_produce(&cq->q);
drivers/infiniband/hw/ionic/ionic_datapath.c
588
cq->color = ionic_color_wrap(cq->q.prod, cq->color);
drivers/infiniband/hw/ionic/ionic_datapath.c
673
u64 dbell_val = cq->q.dbell;
drivers/infiniband/hw/ionic/ionic_datapath.c
676
cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
drivers/infiniband/hw/ionic/ionic_datapath.c
679
cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
drivers/infiniband/hw/ionic/ionic_datapath.c
699
cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
drivers/infiniband/hw/ionic/ionic_ibdev.h
127
struct ionic_queue q;
drivers/infiniband/hw/ionic/ionic_ibdev.h
166
struct ionic_queue q;
drivers/infiniband/hw/ionic/ionic_ibdev.h
207
struct ionic_queue q;
drivers/infiniband/hw/ionic/ionic_queue.c
20
q->depth_log2 = order_base_2(depth + 1);
drivers/infiniband/hw/ionic/ionic_queue.c
21
q->stride_log2 = order_base_2(stride);
drivers/infiniband/hw/ionic/ionic_queue.c
23
if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
drivers/infiniband/hw/ionic/ionic_queue.c
24
q->depth_log2 = PAGE_SHIFT - q->stride_log2;
drivers/infiniband/hw/ionic/ionic_queue.c
26
if (q->depth_log2 > 16 || q->stride_log2 > 16)
drivers/infiniband/hw/ionic/ionic_queue.c
29
q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
drivers/infiniband/hw/ionic/ionic_queue.c
30
q->mask = BIT(q->depth_log2) - 1;
drivers/infiniband/hw/ionic/ionic_queue.c
32
q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
drivers/infiniband/hw/ionic/ionic_queue.c
33
if (!q->ptr)
drivers/infiniband/hw/ionic/ionic_queue.c
37
if (!PAGE_ALIGNED(q->ptr)) {
drivers/infiniband/hw/ionic/ionic_queue.c
38
dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
drivers/infiniband/hw/ionic/ionic_queue.c
42
q->prod = 0;
drivers/infiniband/hw/ionic/ionic_queue.c
43
q->cons = 0;
drivers/infiniband/hw/ionic/ionic_queue.c
44
q->dbell = 0;
drivers/infiniband/hw/ionic/ionic_queue.c
49
void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
drivers/infiniband/hw/ionic/ionic_queue.c
51
dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
drivers/infiniband/hw/ionic/ionic_queue.c
8
int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
drivers/infiniband/hw/ionic/ionic_queue.h
108
static inline bool ionic_queue_full(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
110
return q->mask == ionic_queue_length(q);
drivers/infiniband/hw/ionic/ionic_queue.h
135
static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
drivers/infiniband/hw/ionic/ionic_queue.h
137
return q->ptr + ((unsigned long)idx << q->stride_log2);
drivers/infiniband/hw/ionic/ionic_queue.h
146
static inline void *ionic_queue_at_prod(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
148
return ionic_queue_at(q, q->prod);
drivers/infiniband/hw/ionic/ionic_queue.h
157
static inline void *ionic_queue_at_cons(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
159
return ionic_queue_at(q, q->cons);
drivers/infiniband/hw/ionic/ionic_queue.h
169
static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
drivers/infiniband/hw/ionic/ionic_queue.h
171
return (idx + 1) & q->mask;
drivers/infiniband/hw/ionic/ionic_queue.h
180
static inline void ionic_queue_produce(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
182
q->prod = ionic_queue_next(q, q->prod);
drivers/infiniband/hw/ionic/ionic_queue.h
193
static inline void ionic_queue_consume(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
195
q->cons = ionic_queue_next(q, q->cons);
drivers/infiniband/hw/ionic/ionic_queue.h
207
static inline void ionic_queue_consume_entries(struct ionic_queue *q,
drivers/infiniband/hw/ionic/ionic_queue.h
210
q->cons = (q->cons + entries) & q->mask;
drivers/infiniband/hw/ionic/ionic_queue.h
218
static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
drivers/infiniband/hw/ionic/ionic_queue.h
220
q->dbell = IONIC_DBELL_QID(qid);
drivers/infiniband/hw/ionic/ionic_queue.h
229
static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
231
return q->dbell | q->prod;
drivers/infiniband/hw/ionic/ionic_queue.h
49
int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
drivers/infiniband/hw/ionic/ionic_queue.h
59
void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
drivers/infiniband/hw/ionic/ionic_queue.h
69
static inline bool ionic_queue_empty(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
71
return q->prod == q->cons;
drivers/infiniband/hw/ionic/ionic_queue.h
82
static inline u16 ionic_queue_length(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
84
return (q->prod - q->cons) & q->mask;
drivers/infiniband/hw/ionic/ionic_queue.h
95
static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
drivers/infiniband/hw/ionic/ionic_queue.h
97
return q->mask - ionic_queue_length(q);
drivers/infiniband/hw/irdma/uk.c
1776
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
drivers/infiniband/hw/irdma/uk.c
1800
if ((void *)(unsigned long)comp_ctx == q)
drivers/infiniband/hw/irdma/user.h
585
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
drivers/infiniband/hw/irdma/verbs.c
4728
struct list_head *pos, *q;
drivers/infiniband/hw/irdma/verbs.c
4730
list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
drivers/infiniband/hw/mana/main.c
752
mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
drivers/infiniband/hw/mlx4/mad.c
1026
int p, q;
drivers/infiniband/hw/mlx4/mad.c
1032
for (q = 0; q <= 1; ++q) {
drivers/infiniband/hw/mlx4/mad.c
1035
q ? IB_QPT_GSI : IB_QPT_SMI,
drivers/infiniband/hw/mlx4/mad.c
1042
dev->send_agent[p][q] = agent;
drivers/infiniband/hw/mlx4/mad.c
1044
dev->send_agent[p][q] = NULL;
drivers/infiniband/hw/mlx4/mad.c
1052
for (q = 0; q <= 1; ++q)
drivers/infiniband/hw/mlx4/mad.c
1053
if (dev->send_agent[p][q])
drivers/infiniband/hw/mlx4/mad.c
1054
ib_unregister_mad_agent(dev->send_agent[p][q]);
drivers/infiniband/hw/mlx4/mad.c
1062
int p, q;
drivers/infiniband/hw/mlx4/mad.c
1065
for (q = 0; q <= 1; ++q) {
drivers/infiniband/hw/mlx4/mad.c
1066
agent = dev->send_agent[p][q];
drivers/infiniband/hw/mlx4/mad.c
1068
dev->send_agent[p][q] = NULL;
drivers/infiniband/hw/mthca/mthca_mad.c
286
int p, q;
drivers/infiniband/hw/mthca/mthca_mad.c
292
for (q = 0; q <= 1; ++q) {
drivers/infiniband/hw/mthca/mthca_mad.c
294
q ? IB_QPT_GSI : IB_QPT_SMI,
drivers/infiniband/hw/mthca/mthca_mad.c
301
dev->send_agent[p][q] = agent;
drivers/infiniband/hw/mthca/mthca_mad.c
318
for (q = 0; q <= 1; ++q)
drivers/infiniband/hw/mthca/mthca_mad.c
319
if (dev->send_agent[p][q])
drivers/infiniband/hw/mthca/mthca_mad.c
320
ib_unregister_mad_agent(dev->send_agent[p][q]);
drivers/infiniband/hw/mthca/mthca_mad.c
328
int p, q;
drivers/infiniband/hw/mthca/mthca_mad.c
331
for (q = 0; q <= 1; ++q) {
drivers/infiniband/hw/mthca/mthca_mad.c
332
agent = dev->send_agent[p][q];
drivers/infiniband/hw/mthca/mthca_mad.c
333
dev->send_agent[p][q] = NULL;
drivers/infiniband/hw/ocrdma/ocrdma.h
146
struct ocrdma_queue_info q;
drivers/infiniband/hw/ocrdma/ocrdma.h
557
if (dev->eq_tbl[indx].q.id == eqid)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1012
eq->q.id, eqe.id_valid);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1018
ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1035
ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
113
return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
118
eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1750
eq_id = dev->eq_tbl[0].q.id;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
1757
eq_id = dev->eq_tbl[i].q.id;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
3127
cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
371
static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
373
dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
377
struct ocrdma_queue_info *q, u16 len, u16 entry_size)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
379
memset(q, 0, sizeof(*q));
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
380
q->len = len;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
381
q->entry_size = entry_size;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
382
q->size = len * entry_size;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
383
q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
385
if (!q->va)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
403
struct ocrdma_queue_info *q, int queue_type)
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
424
cmd->id = q->id;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
429
q->created = false;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
448
ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
453
eq->q.id = rsp->vector_eqid & 0xffff;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
455
eq->q.created = true;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
465
status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
474
ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
478
ocrdma_free_q(dev, &eq->q);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
495
if (eq->q.created) {
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
496
ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
497
ocrdma_free_q(dev, &eq->q);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
508
ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
615
status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1554
static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1556
return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1569
static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1571
return q->va + (q->head * q->entry_size);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1574
static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1577
return q->va + (idx * q->entry_size);
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1580
static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1582
q->head = (q->head + 1) & q->max_wqe_idx;
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1585
static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
1587
q->tail = (q->tail + 1) & q->max_wqe_idx;
drivers/infiniband/hw/qedr/qedr.h
288
struct qedr_userq q;
drivers/infiniband/hw/qedr/verbs.c
1007
rc = qedr_db_recovery_add(dev, cq->q.db_addr,
drivers/infiniband/hw/qedr/verbs.c
1008
&cq->q.db_rec_data->db_data,
drivers/infiniband/hw/qedr/verbs.c
1046
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
drivers/infiniband/hw/qedr/verbs.c
1047
ib_umem_release(cq->q.umem);
drivers/infiniband/hw/qedr/verbs.c
1048
if (cq->q.db_mmap_entry)
drivers/infiniband/hw/qedr/verbs.c
1049
rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
drivers/infiniband/hw/qedr/verbs.c
1083
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
drivers/infiniband/hw/qedr/verbs.c
1084
ib_umem_release(cq->q.umem);
drivers/infiniband/hw/qedr/verbs.c
1086
if (cq->q.db_rec_data) {
drivers/infiniband/hw/qedr/verbs.c
1087
qedr_db_recovery_del(dev, cq->q.db_addr,
drivers/infiniband/hw/qedr/verbs.c
1088
&cq->q.db_rec_data->db_data);
drivers/infiniband/hw/qedr/verbs.c
1089
rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
drivers/infiniband/hw/qedr/verbs.c
713
if (cq->q.db_mmap_entry)
drivers/infiniband/hw/qedr/verbs.c
715
rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
drivers/infiniband/hw/qedr/verbs.c
744
struct qedr_dev *dev, struct qedr_userq *q,
drivers/infiniband/hw/qedr/verbs.c
758
q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
drivers/infiniband/hw/qedr/verbs.c
759
if (!q->db_rec_data) {
drivers/infiniband/hw/qedr/verbs.c
768
entry->address = q->db_rec_data;
drivers/infiniband/hw/qedr/verbs.c
777
q->db_mmap_entry = &entry->rdma_entry;
drivers/infiniband/hw/qedr/verbs.c
785
free_page((unsigned long)q->db_rec_data);
drivers/infiniband/hw/qedr/verbs.c
786
q->db_rec_data = NULL;
drivers/infiniband/hw/qedr/verbs.c
792
struct qedr_userq *q, u64 buf_addr,
drivers/infiniband/hw/qedr/verbs.c
800
q->buf_addr = buf_addr;
drivers/infiniband/hw/qedr/verbs.c
801
q->buf_len = buf_len;
drivers/infiniband/hw/qedr/verbs.c
802
q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
drivers/infiniband/hw/qedr/verbs.c
803
if (IS_ERR(q->umem)) {
drivers/infiniband/hw/qedr/verbs.c
805
PTR_ERR(q->umem));
drivers/infiniband/hw/qedr/verbs.c
806
return PTR_ERR(q->umem);
drivers/infiniband/hw/qedr/verbs.c
809
fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
drivers/infiniband/hw/qedr/verbs.c
810
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
drivers/infiniband/hw/qedr/verbs.c
815
q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
drivers/infiniband/hw/qedr/verbs.c
816
if (IS_ERR(q->pbl_tbl)) {
drivers/infiniband/hw/qedr/verbs.c
817
rc = PTR_ERR(q->pbl_tbl);
drivers/infiniband/hw/qedr/verbs.c
820
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
drivers/infiniband/hw/qedr/verbs.c
823
q->pbl_tbl = kzalloc_obj(*q->pbl_tbl);
drivers/infiniband/hw/qedr/verbs.c
824
if (!q->pbl_tbl) {
drivers/infiniband/hw/qedr/verbs.c
831
return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
drivers/infiniband/hw/qedr/verbs.c
834
ib_umem_release(q->umem);
drivers/infiniband/hw/qedr/verbs.c
835
q->umem = NULL;
drivers/infiniband/hw/qedr/verbs.c
967
rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
drivers/infiniband/hw/qedr/verbs.c
973
pbl_ptr = cq->q.pbl_tbl->pa;
drivers/infiniband/hw/qedr/verbs.c
974
page_cnt = cq->q.pbl_info.num_pbes;
drivers/infiniband/hw/qedr/verbs.c
977
cq->q.db_addr = ctx->dpi_addr + db_offset;
drivers/infiniband/sw/rdmavt/qp.c
744
struct rvt_qp *q;
drivers/infiniband/sw/rdmavt/qp.c
749
for (; (q = rcu_dereference_protected(*qpp,
drivers/infiniband/sw/rdmavt/qp.c
751
qpp = &q->next) {
drivers/infiniband/sw/rdmavt/qp.c
752
if (q == qp) {
drivers/infiniband/sw/rxe/rxe_comp.c
591
struct rxe_queue *q = qp->sq.queue;
drivers/infiniband/sw/rxe/rxe_comp.c
598
while ((wqe = queue_head(q, q->type))) {
drivers/infiniband/sw/rxe/rxe_comp.c
604
queue_advance_consumer(q, q->type);
drivers/infiniband/sw/rxe/rxe_queue.c
102
kfree(q);
drivers/infiniband/sw/rxe/rxe_queue.c
110
static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
drivers/infiniband/sw/rxe/rxe_queue.c
113
enum queue_type type = q->type;
drivers/infiniband/sw/rxe/rxe_queue.c
118
if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
drivers/infiniband/sw/rxe/rxe_queue.c
122
prod = queue_get_producer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.c
123
cons = queue_get_consumer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.c
125
while ((prod - cons) & q->index_mask) {
drivers/infiniband/sw/rxe/rxe_queue.c
127
queue_addr_from_index(q, cons), new_q->elem_size);
drivers/infiniband/sw/rxe/rxe_queue.c
129
cons = queue_next_index(q, cons);
drivers/infiniband/sw/rxe/rxe_queue.c
133
q->buf->consumer_index = cons;
drivers/infiniband/sw/rxe/rxe_queue.c
139
q->index = q->buf->consumer_index;
drivers/infiniband/sw/rxe/rxe_queue.c
142
swap(*q, *new_q);
drivers/infiniband/sw/rxe/rxe_queue.c
147
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
drivers/infiniband/sw/rxe/rxe_queue.c
158
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
drivers/infiniband/sw/rxe/rxe_queue.c
174
err = resize_finish(q, new_q, num_elem);
drivers/infiniband/sw/rxe/rxe_queue.c
177
err = resize_finish(q, new_q, num_elem);
drivers/infiniband/sw/rxe/rxe_queue.c
193
void rxe_queue_cleanup(struct rxe_queue *q)
drivers/infiniband/sw/rxe/rxe_queue.c
195
if (q->ip)
drivers/infiniband/sw/rxe/rxe_queue.c
196
kref_put(&q->ip->ref, rxe_mmap_release);
drivers/infiniband/sw/rxe/rxe_queue.c
198
vfree(q->buf);
drivers/infiniband/sw/rxe/rxe_queue.c
200
kfree(q);
drivers/infiniband/sw/rxe/rxe_queue.c
46
inline void rxe_queue_reset(struct rxe_queue *q)
drivers/infiniband/sw/rxe/rxe_queue.c
52
memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
drivers/infiniband/sw/rxe/rxe_queue.c
58
struct rxe_queue *q;
drivers/infiniband/sw/rxe/rxe_queue.c
66
q = kzalloc_obj(*q);
drivers/infiniband/sw/rxe/rxe_queue.c
67
if (!q)
drivers/infiniband/sw/rxe/rxe_queue.c
70
q->rxe = rxe;
drivers/infiniband/sw/rxe/rxe_queue.c
71
q->type = type;
drivers/infiniband/sw/rxe/rxe_queue.c
74
q->elem_size = elem_size;
drivers/infiniband/sw/rxe/rxe_queue.c
81
q->log2_elem_size = order_base_2(elem_size);
drivers/infiniband/sw/rxe/rxe_queue.c
85
q->index_mask = num_slots - 1;
drivers/infiniband/sw/rxe/rxe_queue.c
89
q->buf = vmalloc_user(buf_size);
drivers/infiniband/sw/rxe/rxe_queue.c
90
if (!q->buf)
drivers/infiniband/sw/rxe/rxe_queue.c
93
q->buf->log2_elem_size = q->log2_elem_size;
drivers/infiniband/sw/rxe/rxe_queue.c
94
q->buf->index_mask = q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.c
96
q->buf_size = buf_size;
drivers/infiniband/sw/rxe/rxe_queue.c
99
return q;
drivers/infiniband/sw/rxe/rxe_queue.h
100
static inline u32 queue_get_producer(const struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
108
prod = smp_load_acquire(&q->buf->producer_index);
drivers/infiniband/sw/rxe/rxe_queue.h
112
prod = q->index;
drivers/infiniband/sw/rxe/rxe_queue.h
116
prod = q->buf->producer_index;
drivers/infiniband/sw/rxe/rxe_queue.h
120
prod = smp_load_acquire(&q->buf->producer_index);
drivers/infiniband/sw/rxe/rxe_queue.h
127
static inline u32 queue_get_consumer(const struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
135
cons = q->index;
drivers/infiniband/sw/rxe/rxe_queue.h
139
cons = smp_load_acquire(&q->buf->consumer_index);
drivers/infiniband/sw/rxe/rxe_queue.h
143
cons = smp_load_acquire(&q->buf->consumer_index);
drivers/infiniband/sw/rxe/rxe_queue.h
147
cons = q->buf->consumer_index;
drivers/infiniband/sw/rxe/rxe_queue.h
154
static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
drivers/infiniband/sw/rxe/rxe_queue.h
156
u32 prod = queue_get_producer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
157
u32 cons = queue_get_consumer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
159
return ((prod - cons) & q->index_mask) == 0;
drivers/infiniband/sw/rxe/rxe_queue.h
162
static inline int queue_full(struct rxe_queue *q, enum queue_type type)
drivers/infiniband/sw/rxe/rxe_queue.h
164
u32 prod = queue_get_producer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
165
u32 cons = queue_get_consumer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
167
return ((prod + 1 - cons) & q->index_mask) == 0;
drivers/infiniband/sw/rxe/rxe_queue.h
170
static inline u32 queue_count(const struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
173
u32 prod = queue_get_producer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
174
u32 cons = queue_get_consumer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
176
return (prod - cons) & q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.h
179
static inline void queue_advance_producer(struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
193
prod = q->index;
drivers/infiniband/sw/rxe/rxe_queue.h
194
prod = (prod + 1) & q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.h
195
q->index = prod;
drivers/infiniband/sw/rxe/rxe_queue.h
197
smp_store_release(&q->buf->producer_index, prod);
drivers/infiniband/sw/rxe/rxe_queue.h
201
prod = q->buf->producer_index;
drivers/infiniband/sw/rxe/rxe_queue.h
202
prod = (prod + 1) & q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.h
204
smp_store_release(&q->buf->producer_index, prod);
drivers/infiniband/sw/rxe/rxe_queue.h
215
static inline void queue_advance_consumer(struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
223
cons = (q->index + 1) & q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.h
224
q->index = cons;
drivers/infiniband/sw/rxe/rxe_queue.h
226
smp_store_release(&q->buf->consumer_index, cons);
drivers/infiniband/sw/rxe/rxe_queue.h
242
cons = q->buf->consumer_index;
drivers/infiniband/sw/rxe/rxe_queue.h
243
cons = (cons + 1) & q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.h
245
smp_store_release(&q->buf->consumer_index, cons);
drivers/infiniband/sw/rxe/rxe_queue.h
250
static inline void *queue_producer_addr(struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
253
u32 prod = queue_get_producer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
255
return q->buf->data + (prod << q->log2_elem_size);
drivers/infiniband/sw/rxe/rxe_queue.h
258
static inline void *queue_consumer_addr(struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
261
u32 cons = queue_get_consumer(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
263
return q->buf->data + (cons << q->log2_elem_size);
drivers/infiniband/sw/rxe/rxe_queue.h
266
static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
drivers/infiniband/sw/rxe/rxe_queue.h
268
return q->buf->data + ((index & q->index_mask)
drivers/infiniband/sw/rxe/rxe_queue.h
269
<< q->log2_elem_size);
drivers/infiniband/sw/rxe/rxe_queue.h
272
static inline u32 queue_index_from_addr(const struct rxe_queue *q,
drivers/infiniband/sw/rxe/rxe_queue.h
275
return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
drivers/infiniband/sw/rxe/rxe_queue.h
276
& q->index_mask;
drivers/infiniband/sw/rxe/rxe_queue.h
279
static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
drivers/infiniband/sw/rxe/rxe_queue.h
281
return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
drivers/infiniband/sw/rxe/rxe_queue.h
83
void rxe_queue_reset(struct rxe_queue *q);
drivers/infiniband/sw/rxe/rxe_queue.h
88
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
drivers/infiniband/sw/rxe/rxe_queue.h
95
static inline u32 queue_next_index(struct rxe_queue *q, int index)
drivers/infiniband/sw/rxe/rxe_queue.h
97
return (index + 1) & q->index_mask;
drivers/infiniband/sw/rxe/rxe_req.c
120
struct rxe_queue *q;
drivers/infiniband/sw/rxe/rxe_req.c
128
q = qp->sq.queue;
drivers/infiniband/sw/rxe/rxe_req.c
130
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_req.c
131
wqe = queue_addr_from_index(q, cons);
drivers/infiniband/sw/rxe/rxe_req.c
166
struct rxe_queue *q = qp->sq.queue;
drivers/infiniband/sw/rxe/rxe_req.c
170
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_req.c
174
return queue_addr_from_index(q, index);
drivers/infiniband/sw/rxe/rxe_req.c
44
struct rxe_queue *q = qp->sq.queue;
drivers/infiniband/sw/rxe/rxe_req.c
48
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_req.c
49
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_req.c
56
wqe_index = queue_next_index(q, wqe_index)) {
drivers/infiniband/sw/rxe/rxe_req.c
654
struct rxe_queue *q = qp->sq.queue;
drivers/infiniband/sw/rxe/rxe_req.c
677
qp->req.wqe_index = queue_get_consumer(q,
drivers/infiniband/sw/rxe/rxe_resp.c
1474
struct rxe_queue *q = qp->rq.queue;
drivers/infiniband/sw/rxe/rxe_resp.c
1494
while ((wqe = queue_head(q, q->type))) {
drivers/infiniband/sw/rxe/rxe_resp.c
1500
queue_advance_consumer(q, q->type);
drivers/infiniband/sw/rxe/rxe_resp.c
262
struct rxe_queue *q = srq->rq.queue;
drivers/infiniband/sw/rxe/rxe_resp.c
274
wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_resp.c
290
queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_resp.c
291
count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
drivers/infiniband/sw/rxe/rxe_srq.c
155
struct rxe_queue *q = srq->rq.queue;
drivers/infiniband/sw/rxe/rxe_srq.c
170
err = rxe_queue_resize(q, &attr->max_wr, wqe_size,
drivers/infiniband/sw/rxe/rxe_srq.c
48
struct rxe_queue *q;
drivers/infiniband/sw/rxe/rxe_srq.c
65
q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
drivers/infiniband/sw/rxe/rxe_srq.c
67
if (!q) {
drivers/infiniband/sw/rxe/rxe_srq.c
73
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
drivers/infiniband/sw/rxe/rxe_srq.c
74
q->buf_size, &q->ip);
drivers/infiniband/sw/rxe/rxe_srq.c
83
rxe_queue_cleanup(q);
drivers/infiniband/sw/rxe/rxe_srq.c
88
srq->rq.queue = q;
drivers/infiniband/sw/rxe/rxe_srq.c
94
vfree(q->buf);
drivers/infiniband/sw/rxe/rxe_srq.c
95
kfree(q);
drivers/infiniband/ulp/srp/ib_srp.c
2852
struct request_queue *q = sdev->request_queue;
drivers/infiniband/ulp/srp/ib_srp.c
2857
blk_queue_rq_timeout(q, timeout);
drivers/input/misc/hisi_powerkey.c
29
static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
drivers/input/misc/hisi_powerkey.c
31
struct input_dev *input = q;
drivers/input/misc/hisi_powerkey.c
40
static irqreturn_t hi65xx_power_release_isr(int irq, void *q)
drivers/input/misc/hisi_powerkey.c
42
struct input_dev *input = q;
drivers/input/misc/hisi_powerkey.c
51
static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q)
drivers/input/misc/hisi_powerkey.c
53
struct input_dev *input = q;
drivers/input/misc/hisi_powerkey.c
65
irqreturn_t (*handler)(int irq, void *q);
drivers/input/rmi4/rmi_f54.c
283
static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
drivers/input/rmi4/rmi_f54.c
287
struct f54_data *f54 = q->drv_priv;
drivers/input/rmi4/rmi_f54.c
363
static void rmi_f54_stop_streaming(struct vb2_queue *q)
drivers/input/rmi4/rmi_f54.c
365
struct f54_data *f54 = vb2_get_drv_priv(q);
drivers/input/touchscreen/atmel_mxt_ts.c
2485
static int mxt_queue_setup(struct vb2_queue *q,
drivers/input/touchscreen/atmel_mxt_ts.c
2489
struct mxt_data *data = q->drv_priv;
drivers/input/touchscreen/sur40.c
845
static int sur40_queue_setup(struct vb2_queue *q,
drivers/input/touchscreen/sur40.c
849
struct sur40_state *sur40 = vb2_get_drv_priv(q);
drivers/input/touchscreen/sur40.c
850
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
126
static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
130
prod = Q_IDX(q, q->prod);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
131
cons = Q_IDX(q, q->cons);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
133
if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
134
space = (1 << q->max_n_shift) - (prod - cons);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
141
static bool queue_full(struct arm_smmu_ll_queue *q)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
143
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
144
Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
147
static bool queue_empty(struct arm_smmu_ll_queue *q)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
149
return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
150
Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
153
static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
155
return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
156
(Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
157
((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
158
(Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
161
static void queue_sync_cons_out(struct arm_smmu_queue *q)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
168
writel_relaxed(q->llq.cons, q->cons_reg);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
171
static void queue_inc_cons(struct arm_smmu_ll_queue *q)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
173
u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
174
q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
177
static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
179
struct arm_smmu_ll_queue *llq = &q->llq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
186
queue_sync_cons_out(q);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
189
static int queue_sync_prod_in(struct arm_smmu_queue *q)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
199
prod = readl(q->prod_reg);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2007
struct arm_smmu_queue *q = &smmu->evtq.q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2008
struct arm_smmu_ll_queue *llq = &q->llq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
201
if (Q_OVF(prod) != Q_OVF(q->llq.prod))
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2013
while (!queue_remove_raw(q, evt)) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2026
if (queue_sync_prod_in(q) == -EOVERFLOW)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2031
queue_sync_cons_ovf(q);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
204
q->llq.prod = prod;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2076
struct arm_smmu_queue *q = &smmu->priq.q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2077
struct arm_smmu_ll_queue *llq = &q->llq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
208
static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2081
while (!queue_remove_raw(q, evt))
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2084
if (queue_sync_prod_in(q) == -EOVERFLOW)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
2089
queue_sync_cons_ovf(q);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
210
u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
211
return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
257
static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
259
if (queue_empty(&q->llq))
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
262
queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
263
queue_inc_cons(&q->llq);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
264
queue_sync_cons_out(q);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3777
struct arm_smmu_queue *q, void __iomem *page,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3784
qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3785
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3787
if (q->base || qsz < PAGE_SIZE)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3790
q->llq.max_n_shift--;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3793
if (!q->base) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3800
if (!WARN_ON(q->base_dma & (qsz - 1))) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3802
1 << q->llq.max_n_shift, name);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3805
q->prod_reg = page + prod_off;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3806
q->cons_reg = page + cons_off;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3807
q->ent_dwords = dwords;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3809
q->q_base = Q_BASE_RWA;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3810
q->q_base |= q->base_dma & Q_BASE_ADDR_MASK;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3811
q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3813
q->llq.prod = q->llq.cons = 0;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3820
unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3838
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3849
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
3866
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
394
struct arm_smmu_queue *q = &cmdq->q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
404
ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4045
smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4047
smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
405
q->ent_dwords * 8;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4060
irq = smmu->evtq.q.irq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4083
irq = smmu->priq.q.irq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4213
writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4214
writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4215
writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
422
struct arm_smmu_queue *q = &cmdq->q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4239
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4240
writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4241
writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4253
writeq_relaxed(smmu->priq.q.q_base,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4255
writel_relaxed(smmu->priq.q.llq.prod,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4257
writel_relaxed(smmu->priq.q.llq.cons,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
426
u32 cons = readl_relaxed(q->cons_reg);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4478
smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4480
if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4492
smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4494
smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
458
queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
468
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4881
smmu->evtq.q.irq = irq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
4885
smmu->priq.q.irq = irq;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
592
.max_n_shift = cmdq->q.llq.max_n_shift,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
661
WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
663
llq->val = READ_ONCE(cmdq->q.llq.val);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
669
llq->val = READ_ONCE(cmdq->q.llq.val);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
689
u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
716
llq->val = READ_ONCE(cmdq->q.llq.val);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
751
llq->cons = readl(cmdq->q.cons_reg);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
773
.max_n_shift = cmdq->q.llq.max_n_shift,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
781
queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
812
llq.max_n_shift = cmdq->q.llq.max_n_shift;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
816
llq.val = READ_ONCE(cmdq->q.llq.val);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
831
old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
849
queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
871
&cmdq->q.llq.atomic.prod);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
885
writel_relaxed(prod, cmdq->q.prod_reg);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
903
readl_relaxed(cmdq->q.prod_reg),
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
904
readl_relaxed(cmdq->q.cons_reg));
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
912
WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
190
#define Q_ENT(q, p) ((q)->base + \
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
191
Q_IDX(&((q)->llq), p) * \
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
192
(q)->ent_dwords)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
632
struct arm_smmu_queue q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
652
struct arm_smmu_queue q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
658
struct arm_smmu_queue q;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
982
struct arm_smmu_queue *q, void __iomem *page,
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
1025
writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
1029
vcmdq->cmdq.q.q_base & VCMDQ_ADDR,
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
1030
1UL << (vcmdq->cmdq.q.q_base & VCMDQ_LOG2SIZE));
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
1125
vcmdq->cmdq.q.q_base = base_addr_pa & VCMDQ_ADDR;
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
1126
vcmdq->cmdq.q.q_base |= log2size;
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
41
#define TEGRA241_CMDQV_CMDQ_ALLOC(q) (0x0200 + 0x4*(q))
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
483
writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE));
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
619
struct arm_smmu_queue *q = &cmdq->q;
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
628
q->llq.max_n_shift =
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
632
ret = arm_smmu_init_one_queue(smmu, q, vcmdq->page0,
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
639
q->q_base = q->base_dma & VCMDQ_ADDR;
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
640
q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift);
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
67
#define TEGRA241_VCMDQ_PAGE0(q) (TEGRA241_VCMDQ_PAGE0_BASE + 0x80*(q))
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
84
#define TEGRA241_VCMDQ_PAGE1(q) (TEGRA241_VCMDQ_PAGE1_BASE + 0x80*(q))
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
94
#define TEGRA241_VINTFi_LVCMDQ_PAGE0(i, q) \
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
95
(TEGRA241_VINTFi_PAGE0(i) + 0x80*(q))
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
96
#define TEGRA241_VINTFi_LVCMDQ_PAGE1(i, q) \
drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
97
(TEGRA241_VINTFi_PAGE1(i) + 0x80*(q))
drivers/iommu/riscv/iommu.c
109
#define RISCV_IOMMU_QUEUE_INIT(q, name) do { \
drivers/iommu/riscv/iommu.c
110
struct riscv_iommu_queue *_q = q; \
drivers/iommu/riscv/iommu.c
118
#define Q_HEAD(q) ((q)->qbr + (RISCV_IOMMU_REG_CQH - RISCV_IOMMU_REG_CQB))
drivers/iommu/riscv/iommu.c
119
#define Q_TAIL(q) ((q)->qbr + (RISCV_IOMMU_REG_CQT - RISCV_IOMMU_REG_CQB))
drivers/iommu/riscv/iommu.c
120
#define Q_ITEM(q, index) ((q)->mask & (index))
drivers/iommu/riscv/iommu.c
121
#define Q_IPSR(q) BIT((q)->qid)
drivers/isdn/mISDN/dsp_cmx.c
1303
u8 *d, *p, *q, *o_q;
drivers/isdn/mISDN/dsp_cmx.c
1358
q = dsp->rx_buff; /* received data */
drivers/isdn/mISDN/dsp_cmx.c
1430
*d++ = dsp_audio_mix_law[(p[t] << 8) | q[r]];
drivers/isdn/mISDN/dsp_cmx.c
1435
*d++ = q[r]; /* echo */
drivers/isdn/mISDN/dsp_cmx.c
1482
dsp_audio_law_to_s32[q[r]] +
drivers/isdn/mISDN/dsp_cmx.c
1495
*d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]];
drivers/isdn/mISDN/dsp_cmx.c
1512
dsp_audio_law_to_s32[q[r]];
drivers/isdn/mISDN/dsp_cmx.c
1523
sample = *c++ - dsp_audio_law_to_s32[q[r]];
drivers/isdn/mISDN/dsp_cmx.c
1625
u8 *p, *q;
drivers/isdn/mISDN/dsp_cmx.c
1704
q = dsp->rx_buff;
drivers/isdn/mISDN/dsp_cmx.c
1709
*c++ += dsp_audio_law_to_s32[q[r]];
drivers/isdn/mISDN/dsp_cmx.c
1728
q = dsp->tx_buff;
drivers/isdn/mISDN/dsp_cmx.c
1813
q[r] = dsp_silence;
drivers/md/bcache/super.c
1016
struct request_queue *q;
drivers/md/bcache/super.c
1024
q = bdev_get_queue(dc->bdev);
drivers/md/bcache/super.c
1025
if (blk_queue_dying(q))
drivers/md/bcache/super.c
1406
struct request_queue *q = bdev_get_queue(dc->bdev);
drivers/md/bcache/super.c
1427
dc->partial_stripes_expensive = !!(q->limits.features &
drivers/md/bcache/sysfs.c
1059
uint16_t q[31], *p, *cached;
drivers/md/bcache/sysfs.c
1101
for (i = 0; i < ARRAY_SIZE(q); i++)
drivers/md/bcache/sysfs.c
1102
q[i] = INITIAL_PRIO - cached[n * (i + 1) /
drivers/md/bcache/sysfs.c
1103
(ARRAY_SIZE(q) + 1)];
drivers/md/bcache/sysfs.c
1119
n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
drivers/md/bcache/sysfs.c
1121
for (i = 0; i < ARRAY_SIZE(q); i++)
drivers/md/bcache/sysfs.c
1122
ret += sysfs_emit_at(buf, ret, "%u ", q[i]);
drivers/md/bcache/util.c
100
q = -v;
drivers/md/bcache/util.c
102
q = v;
drivers/md/bcache/util.c
111
t = q & ~(~0 << 10);
drivers/md/bcache/util.c
112
q >>= 10;
drivers/md/bcache/util.c
113
} while (q >= 1000);
drivers/md/bcache/util.c
119
return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
drivers/md/bcache/util.c
121
return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
drivers/md/bcache/util.c
97
uint64_t q;
drivers/md/dm-cache-policy-smq.c
270
static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
drivers/md/dm-cache-policy-smq.c
274
q->es = es;
drivers/md/dm-cache-policy-smq.c
275
q->nr_elts = 0;
drivers/md/dm-cache-policy-smq.c
276
q->nr_levels = nr_levels;
drivers/md/dm-cache-policy-smq.c
278
for (i = 0; i < q->nr_levels; i++) {
drivers/md/dm-cache-policy-smq.c
279
l_init(q->qs + i);
drivers/md/dm-cache-policy-smq.c
280
q->target_count[i] = 0u;
drivers/md/dm-cache-policy-smq.c
283
q->last_target_nr_elts = 0u;
drivers/md/dm-cache-policy-smq.c
284
q->nr_top_levels = 0u;
drivers/md/dm-cache-policy-smq.c
285
q->nr_in_top_levels = 0u;
drivers/md/dm-cache-policy-smq.c
288
static unsigned int q_size(struct queue *q)
drivers/md/dm-cache-policy-smq.c
290
return q->nr_elts;
drivers/md/dm-cache-policy-smq.c
296
static void q_push(struct queue *q, struct entry *e)
drivers/md/dm-cache-policy-smq.c
301
q->nr_elts++;
drivers/md/dm-cache-policy-smq.c
303
l_add_tail(q->es, q->qs + e->level, e);
drivers/md/dm-cache-policy-smq.c
306
static void q_push_front(struct queue *q, struct entry *e)
drivers/md/dm-cache-policy-smq.c
311
q->nr_elts++;
drivers/md/dm-cache-policy-smq.c
313
l_add_head(q->es, q->qs + e->level, e);
drivers/md/dm-cache-policy-smq.c
316
static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
drivers/md/dm-cache-policy-smq.c
321
q->nr_elts++;
drivers/md/dm-cache-policy-smq.c
323
l_add_before(q->es, q->qs + e->level, old, e);
drivers/md/dm-cache-policy-smq.c
326
static void q_del(struct queue *q, struct entry *e)
drivers/md/dm-cache-policy-smq.c
328
l_del(q->es, q->qs + e->level, e);
drivers/md/dm-cache-policy-smq.c
330
q->nr_elts--;
drivers/md/dm-cache-policy-smq.c
336
static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
drivers/md/dm-cache-policy-smq.c
341
max_level = min(max_level, q->nr_levels);
drivers/md/dm-cache-policy-smq.c
344
for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
drivers/md/dm-cache-policy-smq.c
358
static struct entry *q_pop(struct queue *q)
drivers/md/dm-cache-policy-smq.c
360
struct entry *e = q_peek(q, q->nr_levels, true);
drivers/md/dm-cache-policy-smq.c
363
q_del(q, e);
drivers/md/dm-cache-policy-smq.c
373
static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
drivers/md/dm-cache-policy-smq.c
377
for (; level < q->nr_levels; level++)
drivers/md/dm-cache-policy-smq.c
378
for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
drivers/md/dm-cache-policy-smq.c
380
l_del(q->es, q->qs + e->level, e);
drivers/md/dm-cache-policy-smq.c
387
static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
drivers/md/dm-cache-policy-smq.c
393
BUG_ON(lend > q->nr_levels);
drivers/md/dm-cache-policy-smq.c
399
q->target_count[level] =
drivers/md/dm-cache-policy-smq.c
407
static void q_set_targets(struct queue *q)
drivers/md/dm-cache-policy-smq.c
409
if (q->last_target_nr_elts == q->nr_elts)
drivers/md/dm-cache-policy-smq.c
412
q->last_target_nr_elts = q->nr_elts;
drivers/md/dm-cache-policy-smq.c
414
if (q->nr_top_levels > q->nr_levels)
drivers/md/dm-cache-policy-smq.c
415
q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
drivers/md/dm-cache-policy-smq.c
418
q_set_targets_subrange_(q, q->nr_in_top_levels,
drivers/md/dm-cache-policy-smq.c
419
q->nr_levels - q->nr_top_levels, q->nr_levels);
drivers/md/dm-cache-policy-smq.c
421
if (q->nr_in_top_levels < q->nr_elts)
drivers/md/dm-cache-policy-smq.c
422
q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
drivers/md/dm-cache-policy-smq.c
423
0, q->nr_levels - q->nr_top_levels);
drivers/md/dm-cache-policy-smq.c
425
q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
drivers/md/dm-cache-policy-smq.c
429
static void q_redistribute(struct queue *q)
drivers/md/dm-cache-policy-smq.c
435
q_set_targets(q);
drivers/md/dm-cache-policy-smq.c
437
for (level = 0u; level < q->nr_levels - 1u; level++) {
drivers/md/dm-cache-policy-smq.c
438
l = q->qs + level;
drivers/md/dm-cache-policy-smq.c
439
target = q->target_count[level];
drivers/md/dm-cache-policy-smq.c
445
e = __redist_pop_from(q, level + 1u);
drivers/md/dm-cache-policy-smq.c
452
l_add_tail(q->es, l, e);
drivers/md/dm-cache-policy-smq.c
458
l_above = q->qs + level + 1u;
drivers/md/dm-cache-policy-smq.c
460
e = l_pop_tail(q->es, l);
drivers/md/dm-cache-policy-smq.c
467
l_add_tail(q->es, l_above, e);
drivers/md/dm-cache-policy-smq.c
472
static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
drivers/md/dm-cache-policy-smq.c
477
unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
drivers/md/dm-cache-policy-smq.c
480
if (extra_levels && (e->level < q->nr_levels - 1u)) {
drivers/md/dm-cache-policy-smq.c
481
for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
drivers/md/dm-cache-policy-smq.c
485
q_del(q, de);
drivers/md/dm-cache-policy-smq.c
490
q_push_before(q, s1, de);
drivers/md/dm-cache-policy-smq.c
494
q_push_before(q, s2, de);
drivers/md/dm-cache-policy-smq.c
498
q_push(q, de);
drivers/md/dm-cache-policy-smq.c
501
q_push(q, de);
drivers/md/dm-cache-policy-smq.c
505
q_del(q, e);
drivers/md/dm-cache-policy-smq.c
507
q_push(q, e);
drivers/md/dm-cache-policy-smq.c
889
struct queue *q = &mq->dirty;
drivers/md/dm-cache-policy-smq.c
892
for (level = 0; level < q->nr_levels; level++) {
drivers/md/dm-cache-policy-smq.c
894
q_del(q, sentinel);
drivers/md/dm-cache-policy-smq.c
895
q_push(q, sentinel);
drivers/md/dm-cache-policy-smq.c
902
struct queue *q = &mq->clean;
drivers/md/dm-cache-policy-smq.c
905
for (level = 0; level < q->nr_levels; level++) {
drivers/md/dm-cache-policy-smq.c
907
q_del(q, sentinel);
drivers/md/dm-cache-policy-smq.c
908
q_push(q, sentinel);
drivers/md/dm-io.c
316
struct request_queue *q = bdev_get_queue(where->bdev);
drivers/md/dm-io.c
327
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
drivers/md/dm-mpath.c
1631
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
drivers/md/dm-mpath.c
1633
if (pgpath->is_active && !blk_queue_dying(q))
drivers/md/dm-mpath.c
1634
scsi_dh_activate(q, pg_init_done, pgpath);
drivers/md/dm-mpath.c
2219
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
drivers/md/dm-mpath.c
2221
return blk_lld_busy(q);
drivers/md/dm-mpath.c
515
struct request_queue *q;
drivers/md/dm-mpath.c
538
q = bdev_get_queue(bdev);
drivers/md/dm-mpath.c
539
clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
drivers/md/dm-mpath.c
543
if (blk_queue_dying(q)) {
drivers/md/dm-mpath.c
881
struct request_queue *q = bdev_get_queue(bdev);
drivers/md/dm-mpath.c
907
r = scsi_dh_attach(q, m->hw_handler_name);
drivers/md/dm-mpath.c
914
r = scsi_dh_set_params(q, m->hw_handler_params);
drivers/md/dm-mpath.c
931
struct request_queue *q;
drivers/md/dm-mpath.c
951
q = bdev_get_queue(p->path.dev->bdev);
drivers/md/dm-mpath.c
952
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
drivers/md/dm-rq.c
178
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
drivers/md/dm-rq.c
180
blk_mq_delay_kick_requeue_list(q, msecs);
drivers/md/dm-rq.c
192
__dm_mq_kick_requeue_list(rq->q, msecs);
drivers/md/dm-rq.c
226
!clone->q->limits.max_discard_sectors)
drivers/md/dm-rq.c
229
!clone->q->limits.max_write_zeroes_sectors)
drivers/md/dm-rq.c
64
void dm_start_queue(struct request_queue *q)
drivers/md/dm-rq.c
66
blk_mq_unquiesce_queue(q);
drivers/md/dm-rq.c
67
blk_mq_kick_requeue_list(q);
drivers/md/dm-rq.c
70
void dm_stop_queue(struct request_queue *q)
drivers/md/dm-rq.c
72
blk_mq_quiesce_queue(q);
drivers/md/dm-rq.h
37
void dm_start_queue(struct request_queue *q);
drivers/md/dm-rq.h
38
void dm_stop_queue(struct request_queue *q);
drivers/md/dm-stats.c
965
const char *q;
drivers/md/dm-stats.c
970
for (q = h; *q; q++)
drivers/md/dm-stats.c
971
if (*q == ',')
drivers/md/dm-table.c
1462
static void dm_update_crypto_profile(struct request_queue *q,
drivers/md/dm-table.c
1469
if (!q->crypto_profile) {
drivers/md/dm-table.c
1470
blk_crypto_register(t->crypto_profile, q);
drivers/md/dm-table.c
1472
blk_crypto_update_capabilities(q->crypto_profile,
drivers/md/dm-table.c
1494
static void dm_update_crypto_profile(struct request_queue *q,
drivers/md/dm-table.c
1897
struct request_queue *q = bdev_get_queue(dev->bdev);
drivers/md/dm-table.c
1900
mutex_lock(&q->limits_lock);
drivers/md/dm-table.c
1901
b = !q->limits.max_write_zeroes_sectors;
drivers/md/dm-table.c
1902
mutex_unlock(&q->limits_lock);
drivers/md/dm-table.c
2028
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
drivers/md/dm-table.c
2069
r = dm_set_zones_restrictions(t, q, limits);
drivers/md/dm-table.c
2083
old_limits = queue_limits_start_update(q);
drivers/md/dm-table.c
2084
r = queue_limits_commit_update(q, limits);
drivers/md/dm-table.c
2094
r = dm_revalidate_zones(t, q);
drivers/md/dm-table.c
2096
queue_limits_set(q, &old_limits);
drivers/md/dm-table.c
2110
dm_update_crypto_profile(q, t);
drivers/md/dm-table.c
415
struct request_queue *q = bdev_get_queue(bdev);
drivers/md/dm-table.c
417
if (unlikely(!q)) {
drivers/md/dm-table.c
423
mutex_lock(&q->limits_lock);
drivers/md/dm-table.c
428
limits->features |= (q->limits.features & BLK_FEAT_ATOMIC_WRITES);
drivers/md/dm-table.c
430
if (blk_stack_limits(limits, &q->limits,
drivers/md/dm-table.c
436
q->limits.physical_block_size,
drivers/md/dm-table.c
437
q->limits.logical_block_size,
drivers/md/dm-table.c
438
q->limits.alignment_offset,
drivers/md/dm-table.c
447
mutex_unlock(&q->limits_lock);
drivers/md/dm-table.c
890
struct request_queue *q = bdev_get_queue(bdev);
drivers/md/dm-table.c
896
return !queue_is_mq(q);
drivers/md/dm-zone.c
152
struct request_queue *q = md->queue;
drivers/md/dm-zone.c
154
if (!blk_queue_is_zoned(q))
drivers/md/dm-zone.c
172
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
drivers/md/dm-zone.c
190
queue_emulates_zone_append(q) ? "emulated" : "native");
drivers/md/dm-zone.c
349
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
drivers/md/dm-zone.c
365
WARN_ON_ONCE(queue_is_mq(q));
drivers/md/dm-zone.c
412
if (q->limits.chunk_sectors != lim->chunk_sectors) {
drivers/md/dm.c
2215
static void dm_queue_destroy_crypto_profile(struct request_queue *q)
drivers/md/dm.c
2217
dm_destroy_crypto_profile(q->crypto_profile);
drivers/md/dm.c
2222
static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
drivers/md/dm.h
105
int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
drivers/md/dm.h
107
int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
drivers/md/dm.h
64
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
drivers/md/raid5.c
7133
struct request_queue *q = mddev->gendisk->queue;
drivers/md/raid5.c
7134
struct queue_limits lim = queue_limits_start_update(q);
drivers/md/raid5.c
7141
err = queue_limits_commit_update(q, &lim);
drivers/media/common/saa7146/saa7146_fops.c
102
struct saa7146_dmaqueue *q, int vbi)
drivers/media/common/saa7146/saa7146_fops.c
106
if (WARN_ON(!q))
drivers/media/common/saa7146/saa7146_fops.c
109
DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi);
drivers/media/common/saa7146/saa7146_fops.c
112
if (!list_empty(&q->queue)) {
drivers/media/common/saa7146/saa7146_fops.c
114
buf = list_entry(q->queue.next, struct saa7146_buf, list);
drivers/media/common/saa7146/saa7146_fops.c
116
if (!list_empty(&q->queue))
drivers/media/common/saa7146/saa7146_fops.c
117
next = list_entry(q->queue.next, struct saa7146_buf, list);
drivers/media/common/saa7146/saa7146_fops.c
118
q->curr = buf;
drivers/media/common/saa7146/saa7146_fops.c
120
buf, q->queue.prev, q->queue.next);
drivers/media/common/saa7146/saa7146_fops.c
150
timer_delete(&q->timeout);
drivers/media/common/saa7146/saa7146_fops.c
156
struct saa7146_dmaqueue *q = timer_container_of(q, t, timeout);
drivers/media/common/saa7146/saa7146_fops.c
157
struct saa7146_dev *dev = q->dev;
drivers/media/common/saa7146/saa7146_fops.c
160
DEB_EE("dev:%p, dmaq:%p\n", dev, q);
drivers/media/common/saa7146/saa7146_fops.c
163
if (q->curr) {
drivers/media/common/saa7146/saa7146_fops.c
164
DEB_D("timeout on %p\n", q->curr);
drivers/media/common/saa7146/saa7146_fops.c
165
saa7146_buffer_finish(dev, q, VB2_BUF_STATE_ERROR);
drivers/media/common/saa7146/saa7146_fops.c
175
saa7146_buffer_next(dev, q, 0);
drivers/media/common/saa7146/saa7146_fops.c
346
struct vb2_queue *q;
drivers/media/common/saa7146/saa7146_fops.c
355
q = &dev->vv_data->video_dmaq.q;
drivers/media/common/saa7146/saa7146_fops.c
358
q = &dev->vv_data->vbi_dmaq.q;
drivers/media/common/saa7146/saa7146_fops.c
381
q->type = type == VFL_TYPE_VIDEO ? V4L2_BUF_TYPE_VIDEO_CAPTURE : V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/common/saa7146/saa7146_fops.c
382
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/common/saa7146/saa7146_fops.c
383
q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
drivers/media/common/saa7146/saa7146_fops.c
384
q->ops = type == VFL_TYPE_VIDEO ? &video_qops : &vbi_qops;
drivers/media/common/saa7146/saa7146_fops.c
385
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/common/saa7146/saa7146_fops.c
386
q->drv_priv = dev;
drivers/media/common/saa7146/saa7146_fops.c
387
q->gfp_flags = __GFP_DMA32;
drivers/media/common/saa7146/saa7146_fops.c
388
q->buf_struct_size = sizeof(struct saa7146_buf);
drivers/media/common/saa7146/saa7146_fops.c
389
q->lock = &dev->v4l2_lock;
drivers/media/common/saa7146/saa7146_fops.c
390
q->min_queued_buffers = 2;
drivers/media/common/saa7146/saa7146_fops.c
391
q->dev = &dev->pci->dev;
drivers/media/common/saa7146/saa7146_fops.c
392
err = vb2_queue_init(q);
drivers/media/common/saa7146/saa7146_fops.c
395
vfd->queue = q;
drivers/media/common/saa7146/saa7146_fops.c
49
struct saa7146_dmaqueue *q,
drivers/media/common/saa7146/saa7146_fops.c
53
DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf);
drivers/media/common/saa7146/saa7146_fops.c
55
if (WARN_ON(!q))
drivers/media/common/saa7146/saa7146_fops.c
58
if (NULL == q->curr) {
drivers/media/common/saa7146/saa7146_fops.c
59
q->curr = buf;
drivers/media/common/saa7146/saa7146_fops.c
63
list_add_tail(&buf->list, &q->queue);
drivers/media/common/saa7146/saa7146_fops.c
71
struct saa7146_dmaqueue *q,
drivers/media/common/saa7146/saa7146_fops.c
75
struct saa7146_buf *buf = q->curr;
drivers/media/common/saa7146/saa7146_fops.c
78
DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state);
drivers/media/common/saa7146/saa7146_fops.c
79
DEB_EE("q->curr:%p\n", q->curr);
drivers/media/common/saa7146/saa7146_fops.c
87
q->curr = NULL;
drivers/media/common/saa7146/saa7146_vbi.c
220
static int queue_setup(struct vb2_queue *q,
drivers/media/common/saa7146/saa7146_vbi.c
290
static void return_buffers(struct vb2_queue *q, int state)
drivers/media/common/saa7146/saa7146_vbi.c
292
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_vbi.c
380
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/common/saa7146/saa7146_vbi.c
382
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_vbi.c
385
if (!vb2_is_streaming(&dev->vv_data->vbi_dmaq.q))
drivers/media/common/saa7146/saa7146_vbi.c
389
return_buffers(q, VB2_BUF_STATE_QUEUED);
drivers/media/common/saa7146/saa7146_vbi.c
393
static void stop_streaming(struct vb2_queue *q)
drivers/media/common/saa7146/saa7146_vbi.c
395
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_vbi.c
398
return_buffers(q, VB2_BUF_STATE_ERROR);
drivers/media/common/saa7146/saa7146_video.c
309
if (vb2_is_busy(&vv->video_dmaq.q))
drivers/media/common/saa7146/saa7146_video.c
315
if (vb2_is_busy(&vv->video_dmaq.q))
drivers/media/common/saa7146/saa7146_video.c
427
if (vb2_is_busy(&vv->video_dmaq.q)) {
drivers/media/common/saa7146/saa7146_video.c
474
if (vb2_is_busy(&vv->video_dmaq.q) || vb2_is_busy(&vv->vbi_dmaq.q)) {
drivers/media/common/saa7146/saa7146_video.c
556
static int queue_setup(struct vb2_queue *q,
drivers/media/common/saa7146/saa7146_video.c
560
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_video.c
635
static void return_buffers(struct vb2_queue *q, int state)
drivers/media/common/saa7146/saa7146_video.c
637
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_video.c
653
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/common/saa7146/saa7146_video.c
655
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_video.c
658
if (!vb2_is_streaming(&dev->vv_data->video_dmaq.q))
drivers/media/common/saa7146/saa7146_video.c
662
return_buffers(q, VB2_BUF_STATE_QUEUED);
drivers/media/common/saa7146/saa7146_video.c
666
static void stop_streaming(struct vb2_queue *q)
drivers/media/common/saa7146/saa7146_video.c
668
struct saa7146_dev *dev = vb2_get_drv_priv(q);
drivers/media/common/saa7146/saa7146_video.c
673
return_buffers(q, VB2_BUF_STATE_ERROR);
drivers/media/common/saa7146/saa7146_video.c
707
struct saa7146_dmaqueue *q = &vv->video_dmaq;
drivers/media/common/saa7146/saa7146_video.c
713
if (q->curr)
drivers/media/common/saa7146/saa7146_video.c
714
saa7146_buffer_finish(dev, q, VB2_BUF_STATE_DONE);
drivers/media/common/saa7146/saa7146_video.c
715
saa7146_buffer_next(dev,q,0);
drivers/media/common/videobuf2/videobuf2-core.c
1007
__vb2_queue_free(q, first_index, allocated_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
1008
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1011
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1018
q->waiting_for_buffers = !q->is_output;
drivers/media/common/videobuf2/videobuf2-core.c
1019
q->is_busy = 1;
drivers/media/common/videobuf2/videobuf2-core.c
1024
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1025
q->memory = VB2_MEMORY_UNKNOWN;
drivers/media/common/videobuf2/videobuf2-core.c
1026
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1027
vb2_core_free_buffers_storage(q);
drivers/media/common/videobuf2/videobuf2-core.c
103
#define log_qop(q, op) \
drivers/media/common/videobuf2/videobuf2-core.c
1032
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
drivers/media/common/videobuf2/videobuf2-core.c
104
dprintk(q, 2, "call_qop(%s)%s\n", #op, \
drivers/media/common/videobuf2/videobuf2-core.c
1041
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-core.c
1045
if (q_num_bufs == q->max_num_buffers) {
drivers/media/common/videobuf2/videobuf2-core.c
1046
dprintk(q, 1, "maximum number of buffers already allocated\n");
drivers/media/common/videobuf2/videobuf2-core.c
105
(q)->ops->op ? "" : " (nop)")
drivers/media/common/videobuf2/videobuf2-core.c
1051
if (q->waiting_in_dqbuf && *count) {
drivers/media/common/videobuf2/videobuf2-core.c
1052
dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
drivers/media/common/videobuf2/videobuf2-core.c
1055
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
drivers/media/common/videobuf2/videobuf2-core.c
1060
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1061
ret = vb2_core_allocated_buffers_storage(q);
drivers/media/common/videobuf2/videobuf2-core.c
1062
q->memory = memory;
drivers/media/common/videobuf2/videobuf2-core.c
1063
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1066
q->waiting_for_buffers = !q->is_output;
drivers/media/common/videobuf2/videobuf2-core.c
1067
set_queue_coherency(q, non_coherent_mem);
drivers/media/common/videobuf2/videobuf2-core.c
1069
if (q->memory != memory) {
drivers/media/common/videobuf2/videobuf2-core.c
107
#define call_qop(q, op, args...) \
drivers/media/common/videobuf2/videobuf2-core.c
1070
dprintk(q, 1, "memory model mismatch\n");
drivers/media/common/videobuf2/videobuf2-core.c
1073
if (!verify_coherency_flags(q, non_coherent_mem))
drivers/media/common/videobuf2/videobuf2-core.c
1077
num_buffers = min(*count, q->max_num_buffers - q_num_bufs);
drivers/media/common/videobuf2/videobuf2-core.c
1088
ret = call_qop(q, queue_setup, q, &num_buffers,
drivers/media/common/videobuf2/videobuf2-core.c
1089
&num_planes, plane_sizes, q->alloc_devs);
drivers/media/common/videobuf2/videobuf2-core.c
1094
allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
drivers/media/common/videobuf2/videobuf2-core.c
1097
dprintk(q, 1, "memory allocation failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
111
log_qop(q, op); \
drivers/media/common/videobuf2/videobuf2-core.c
1112
ret = call_qop(q, queue_setup, q, &num_buffers,
drivers/media/common/videobuf2/videobuf2-core.c
1113
&num_planes, plane_sizes, q->alloc_devs);
drivers/media/common/videobuf2/videobuf2-core.c
112
err = (q)->ops->op ? (q)->ops->op(args) : 0; \
drivers/media/common/videobuf2/videobuf2-core.c
1124
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1132
__vb2_queue_free(q, *first_index, allocated_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
1133
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1136
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
114
(q)->cnt_ ## op++; \
drivers/media/common/videobuf2/videobuf2-core.c
1143
q->is_busy = 1;
drivers/media/common/videobuf2/videobuf2-core.c
1149
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1150
q->memory = VB2_MEMORY_UNKNOWN;
drivers/media/common/videobuf2/videobuf2-core.c
1151
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1178
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
118
#define call_void_qop(q, op, args...) \
drivers/media/common/videobuf2/videobuf2-core.c
1196
dprintk(q, 4, "done processing on buffer %d, state: %s\n",
drivers/media/common/videobuf2/videobuf2-core.c
120
log_qop(q, op); \
drivers/media/common/videobuf2/videobuf2-core.c
1202
spin_lock_irqsave(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
1207
list_add_tail(&vb->done_entry, &q->done_list);
drivers/media/common/videobuf2/videobuf2-core.c
121
if ((q)->ops->op) \
drivers/media/common/videobuf2/videobuf2-core.c
1210
atomic_dec(&q->owned_by_drv_count);
drivers/media/common/videobuf2/videobuf2-core.c
1217
spin_unlock_irqrestore(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
1219
trace_vb2_buf_done(q, vb);
drivers/media/common/videobuf2/videobuf2-core.c
122
(q)->ops->op(args); \
drivers/media/common/videobuf2/videobuf2-core.c
1226
wake_up(&q->done_wq);
drivers/media/common/videobuf2/videobuf2-core.c
123
(q)->cnt_ ## op++; \
drivers/media/common/videobuf2/videobuf2-core.c
1232
void vb2_discard_done(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
1237
spin_lock_irqsave(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
1238
list_for_each_entry(vb, &q->done_list, done_entry)
drivers/media/common/videobuf2/videobuf2-core.c
1240
spin_unlock_irqrestore(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
1262
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
1282
dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n",
drivers/media/common/videobuf2/videobuf2-core.c
1287
dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
1314
q->alloc_devs[plane] ? : q->dev,
drivers/media/common/videobuf2/videobuf2-core.c
1318
dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
1345
dprintk(q, 1, "buffer initialization failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
1352
dprintk(q, 1, "buffer preparation failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
1378
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
1397
dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
1408
dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
drivers/media/common/videobuf2/videobuf2-core.c
1420
dprintk(q, 3, "buffer for plane %d changed\n", plane);
drivers/media/common/videobuf2/videobuf2-core.c
1440
q->alloc_devs[plane] == q->alloc_devs[i]) {
drivers/media/common/videobuf2/videobuf2-core.c
1454
q->alloc_devs[plane] ? : q->dev,
drivers/media/common/videobuf2/videobuf2-core.c
1458
dprintk(q, 1, "failed to attach dmabuf\n");
drivers/media/common/videobuf2/videobuf2-core.c
1473
dprintk(q, 1, "failed to map dmabuf for plane %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
1502
dprintk(q, 1, "buffer initialization failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
1509
dprintk(q, 1, "buffer preparation failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
1533
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
1536
atomic_inc(&q->owned_by_drv_count);
drivers/media/common/videobuf2/videobuf2-core.c
1538
trace_vb2_buf_queue(q, vb);
drivers/media/common/videobuf2/videobuf2-core.c
1545
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
1549
if (q->error) {
drivers/media/common/videobuf2/videobuf2-core.c
1550
dprintk(q, 1, "fatal error occurred on queue\n");
drivers/media/common/videobuf2/videobuf2-core.c
1558
if (q->is_output) {
drivers/media/common/videobuf2/videobuf2-core.c
1561
dprintk(q, 1, "buffer validation failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
1568
switch (q->memory) {
drivers/media/common/videobuf2/videobuf2-core.c
1585
dprintk(q, 1, "buffer preparation failed: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-core.c
167
#define call_qop(q, op, args...) \
drivers/media/common/videobuf2/videobuf2-core.c
168
((q)->ops->op ? (q)->ops->op(args) : 0)
drivers/media/common/videobuf2/videobuf2-core.c
1693
int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
drivers/media/common/videobuf2/videobuf2-core.c
1698
dprintk(q, 1, "invalid buffer state %s\n",
drivers/media/common/videobuf2/videobuf2-core.c
170
#define call_void_qop(q, op, args...) \
drivers/media/common/videobuf2/videobuf2-core.c
1703
dprintk(q, 1, "buffer already prepared\n");
drivers/media/common/videobuf2/videobuf2-core.c
1712
call_void_bufop(q, fill_user_buffer, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
1714
dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index);
drivers/media/common/videobuf2/videobuf2-core.c
172
if ((q)->ops->op) \
drivers/media/common/videobuf2/videobuf2-core.c
1720
int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count)
drivers/media/common/videobuf2/videobuf2-core.c
1723
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-core.c
173
(q)->ops->op(args); \
drivers/media/common/videobuf2/videobuf2-core.c
1731
if (start > q->max_num_buffers - count)
drivers/media/common/videobuf2/videobuf2-core.c
1734
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1738
struct vb2_buffer *vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
1749
__vb2_queue_free(q, start, count);
drivers/media/common/videobuf2/videobuf2-core.c
1750
dprintk(q, 2, "%u buffers removed\n", count);
drivers/media/common/videobuf2/videobuf2-core.c
1753
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
1769
static int vb2_start_streaming(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
1778
list_for_each_entry(vb, &q->queued_list, queued_entry)
drivers/media/common/videobuf2/videobuf2-core.c
1782
q->start_streaming_called = 1;
drivers/media/common/videobuf2/videobuf2-core.c
1783
ret = call_qop(q, start_streaming, q,
drivers/media/common/videobuf2/videobuf2-core.c
1784
atomic_read(&q->owned_by_drv_count));
drivers/media/common/videobuf2/videobuf2-core.c
1788
q->start_streaming_called = 0;
drivers/media/common/videobuf2/videobuf2-core.c
1790
dprintk(q, 1, "driver refused to start streaming\n");
drivers/media/common/videobuf2/videobuf2-core.c
1797
if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
drivers/media/common/videobuf2/videobuf2-core.c
1804
for (i = 0; i < q->max_num_buffers; ++i) {
drivers/media/common/videobuf2/videobuf2-core.c
1805
vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
1814
WARN_ON(atomic_read(&q->owned_by_drv_count));
drivers/media/common/videobuf2/videobuf2-core.c
1821
WARN_ON(!list_empty(&q->done_list));
drivers/media/common/videobuf2/videobuf2-core.c
1825
int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
drivers/media/common/videobuf2/videobuf2-core.c
1831
if (q->error) {
drivers/media/common/videobuf2/videobuf2-core.c
1832
dprintk(q, 1, "fatal error occurred on queue\n");
drivers/media/common/videobuf2/videobuf2-core.c
1837
q->requires_requests) {
drivers/media/common/videobuf2/videobuf2-core.c
1838
dprintk(q, 1, "qbuf requires a request\n");
drivers/media/common/videobuf2/videobuf2-core.c
1842
if ((req && q->uses_qbuf) ||
drivers/media/common/videobuf2/videobuf2-core.c
1844
q->uses_requests)) {
drivers/media/common/videobuf2/videobuf2-core.c
1845
dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n");
drivers/media/common/videobuf2/videobuf2-core.c
1852
q->uses_requests = 1;
drivers/media/common/videobuf2/videobuf2-core.c
1854
dprintk(q, 1, "buffer %d not in dequeued state\n",
drivers/media/common/videobuf2/videobuf2-core.c
1859
if (q->is_output && !vb->prepared) {
drivers/media/common/videobuf2/videobuf2-core.c
1862
dprintk(q, 1, "buffer validation failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
187
#define call_bufop(q, op, args...) \
drivers/media/common/videobuf2/videobuf2-core.c
1874
q, true, &vb->req_obj);
drivers/media/common/videobuf2/videobuf2-core.c
1894
call_void_bufop(q, copy_timestamp, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
1895
call_void_bufop(q, fill_user_buffer, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
1898
dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
drivers/media/common/videobuf2/videobuf2-core.c
190
if (q && q->buf_ops && q->buf_ops->op) \
drivers/media/common/videobuf2/videobuf2-core.c
1903
q->uses_qbuf = 1;
drivers/media/common/videobuf2/videobuf2-core.c
191
ret = q->buf_ops->op(args); \
drivers/media/common/videobuf2/videobuf2-core.c
1915
dprintk(q, 1, "buffer still being prepared\n");
drivers/media/common/videobuf2/videobuf2-core.c
1918
dprintk(q, 1, "invalid buffer state %s\n",
drivers/media/common/videobuf2/videobuf2-core.c
1928
list_add_tail(&vb->queued_entry, &q->queued_list);
drivers/media/common/videobuf2/videobuf2-core.c
1929
q->queued_count++;
drivers/media/common/videobuf2/videobuf2-core.c
1930
q->waiting_for_buffers = false;
drivers/media/common/videobuf2/videobuf2-core.c
1934
call_void_bufop(q, copy_timestamp, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
1936
trace_vb2_qbuf(q, vb);
drivers/media/common/videobuf2/videobuf2-core.c
1942
if (q->start_streaming_called)
drivers/media/common/videobuf2/videobuf2-core.c
1947
call_void_bufop(q, fill_user_buffer, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
195
#define call_void_bufop(q, op, args...) \
drivers/media/common/videobuf2/videobuf2-core.c
1955
if (q->streaming && !q->start_streaming_called &&
drivers/media/common/videobuf2/videobuf2-core.c
1956
q->queued_count >= q->min_queued_buffers) {
drivers/media/common/videobuf2/videobuf2-core.c
1957
ret = vb2_start_streaming(q);
drivers/media/common/videobuf2/videobuf2-core.c
1965
q->queued_count--;
drivers/media/common/videobuf2/videobuf2-core.c
197
if (q && q->buf_ops && q->buf_ops->op) \
drivers/media/common/videobuf2/videobuf2-core.c
1971
dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
drivers/media/common/videobuf2/videobuf2-core.c
198
q->buf_ops->op(args); \
drivers/media/common/videobuf2/videobuf2-core.c
1982
static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
drivers/media/common/videobuf2/videobuf2-core.c
1996
if (q->waiting_in_dqbuf) {
drivers/media/common/videobuf2/videobuf2-core.c
1997
dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
drivers/media/common/videobuf2/videobuf2-core.c
2001
if (!q->streaming) {
drivers/media/common/videobuf2/videobuf2-core.c
2002
dprintk(q, 1, "streaming off, will not wait for buffers\n");
drivers/media/common/videobuf2/videobuf2-core.c
2006
if (q->error) {
drivers/media/common/videobuf2/videobuf2-core.c
2007
dprintk(q, 1, "Queue in error state, will not wait for buffers\n");
drivers/media/common/videobuf2/videobuf2-core.c
201
static void __vb2_queue_cancel(struct vb2_queue *q);
drivers/media/common/videobuf2/videobuf2-core.c
2011
if (q->last_buffer_dequeued) {
drivers/media/common/videobuf2/videobuf2-core.c
2012
dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n");
drivers/media/common/videobuf2/videobuf2-core.c
2016
if (!list_empty(&q->done_list)) {
drivers/media/common/videobuf2/videobuf2-core.c
2024
dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n");
drivers/media/common/videobuf2/videobuf2-core.c
2028
q->waiting_in_dqbuf = 1;
drivers/media/common/videobuf2/videobuf2-core.c
2034
mutex_unlock(q->lock);
drivers/media/common/videobuf2/videobuf2-core.c
2039
dprintk(q, 3, "will sleep waiting for buffers\n");
drivers/media/common/videobuf2/videobuf2-core.c
2040
ret = wait_event_interruptible(q->done_wq,
drivers/media/common/videobuf2/videobuf2-core.c
2041
!list_empty(&q->done_list) || !q->streaming ||
drivers/media/common/videobuf2/videobuf2-core.c
2042
q->error);
drivers/media/common/videobuf2/videobuf2-core.c
2044
mutex_lock(q->lock);
drivers/media/common/videobuf2/videobuf2-core.c
2046
q->waiting_in_dqbuf = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2052
dprintk(q, 1, "sleep was interrupted\n");
drivers/media/common/videobuf2/videobuf2-core.c
2064
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
drivers/media/common/videobuf2/videobuf2-core.c
2073
ret = __vb2_wait_for_done_vb(q, nonblocking);
drivers/media/common/videobuf2/videobuf2-core.c
2081
spin_lock_irqsave(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
2082
*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
drivers/media/common/videobuf2/videobuf2-core.c
2089
ret = call_bufop(q, verify_planes_array, *vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
2092
spin_unlock_irqrestore(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
2097
int vb2_wait_for_all_buffers(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
2099
if (!q->streaming) {
drivers/media/common/videobuf2/videobuf2-core.c
2100
dprintk(q, 1, "streaming off, will not wait for buffers\n");
drivers/media/common/videobuf2/videobuf2-core.c
2104
if (q->start_streaming_called)
drivers/media/common/videobuf2/videobuf2-core.c
2105
wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
drivers/media/common/videobuf2/videobuf2-core.c
2115
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
2123
call_void_bufop(q, init_buffer, vb);
drivers/media/common/videobuf2/videobuf2-core.c
2126
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
drivers/media/common/videobuf2/videobuf2-core.c
2132
ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
drivers/media/common/videobuf2/videobuf2-core.c
2138
dprintk(q, 3, "returning done buffer\n");
drivers/media/common/videobuf2/videobuf2-core.c
2141
dprintk(q, 3, "returning done buffer with errors\n");
drivers/media/common/videobuf2/videobuf2-core.c
2144
dprintk(q, 1, "invalid buffer state %s\n",
drivers/media/common/videobuf2/videobuf2-core.c
2157
call_void_bufop(q, fill_user_buffer, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
2161
q->queued_count--;
drivers/media/common/videobuf2/videobuf2-core.c
2163
trace_vb2_dqbuf(q, vb);
drivers/media/common/videobuf2/videobuf2-core.c
2176
dprintk(q, 2, "dqbuf of buffer %d, state: %s\n",
drivers/media/common/videobuf2/videobuf2-core.c
2190
static void __vb2_queue_cancel(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
2198
if (q->start_streaming_called)
drivers/media/common/videobuf2/videobuf2-core.c
2199
call_void_qop(q, stop_streaming, q);
drivers/media/common/videobuf2/videobuf2-core.c
2201
if (q->streaming)
drivers/media/common/videobuf2/videobuf2-core.c
2202
call_void_qop(q, unprepare_streaming, q);
drivers/media/common/videobuf2/videobuf2-core.c
2210
if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
drivers/media/common/videobuf2/videobuf2-core.c
2211
for (i = 0; i < q->max_num_buffers; i++) {
drivers/media/common/videobuf2/videobuf2-core.c
2212
struct vb2_buffer *vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
2224
WARN_ON(atomic_read(&q->owned_by_drv_count));
drivers/media/common/videobuf2/videobuf2-core.c
2227
q->streaming = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2228
q->start_streaming_called = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2229
q->queued_count = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2230
q->error = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2231
q->uses_requests = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2232
q->uses_qbuf = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2237
INIT_LIST_HEAD(&q->queued_list);
drivers/media/common/videobuf2/videobuf2-core.c
2242
INIT_LIST_HEAD(&q->done_list);
drivers/media/common/videobuf2/videobuf2-core.c
2243
atomic_set(&q->owned_by_drv_count, 0);
drivers/media/common/videobuf2/videobuf2-core.c
2244
wake_up_all(&q->done_wq);
drivers/media/common/videobuf2/videobuf2-core.c
225
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
2255
for (i = 0; i < q->max_num_buffers; i++) {
drivers/media/common/videobuf2/videobuf2-core.c
2259
vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
2301
int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
drivers/media/common/videobuf2/videobuf2-core.c
2303
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-core.c
2306
if (type != q->type) {
drivers/media/common/videobuf2/videobuf2-core.c
2307
dprintk(q, 1, "invalid stream type\n");
drivers/media/common/videobuf2/videobuf2-core.c
2311
if (q->streaming) {
drivers/media/common/videobuf2/videobuf2-core.c
2312
dprintk(q, 3, "already streaming\n");
drivers/media/common/videobuf2/videobuf2-core.c
2317
dprintk(q, 1, "no buffers have been allocated\n");
drivers/media/common/videobuf2/videobuf2-core.c
2321
if (q_num_bufs < q->min_queued_buffers) {
drivers/media/common/videobuf2/videobuf2-core.c
2322
dprintk(q, 1, "need at least %u allocated buffers\n",
drivers/media/common/videobuf2/videobuf2-core.c
2323
q->min_queued_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
2327
ret = call_qop(q, prepare_streaming, q);
drivers/media/common/videobuf2/videobuf2-core.c
2335
if (q->queued_count >= q->min_queued_buffers) {
drivers/media/common/videobuf2/videobuf2-core.c
2336
ret = vb2_start_streaming(q);
drivers/media/common/videobuf2/videobuf2-core.c
2341
q->streaming = 1;
drivers/media/common/videobuf2/videobuf2-core.c
2343
dprintk(q, 3, "successful\n");
drivers/media/common/videobuf2/videobuf2-core.c
2347
call_void_qop(q, unprepare_streaming, q);
drivers/media/common/videobuf2/videobuf2-core.c
2352
void vb2_queue_error(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
2354
q->error = 1;
drivers/media/common/videobuf2/videobuf2-core.c
2356
wake_up_all(&q->done_wq);
drivers/media/common/videobuf2/videobuf2-core.c
2360
int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
drivers/media/common/videobuf2/videobuf2-core.c
2362
if (type != q->type) {
drivers/media/common/videobuf2/videobuf2-core.c
2363
dprintk(q, 1, "invalid stream type\n");
drivers/media/common/videobuf2/videobuf2-core.c
2376
__vb2_queue_cancel(q);
drivers/media/common/videobuf2/videobuf2-core.c
2377
q->waiting_for_buffers = !q->is_output;
drivers/media/common/videobuf2/videobuf2-core.c
2378
q->last_buffer_dequeued = false;
drivers/media/common/videobuf2/videobuf2-core.c
2380
dprintk(q, 3, "successful\n");
drivers/media/common/videobuf2/videobuf2-core.c
2388
static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset,
drivers/media/common/videobuf2/videobuf2-core.c
2397
lockdep_assert_held(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2399
if (q->memory != VB2_MEMORY_MMAP) {
drivers/media/common/videobuf2/videobuf2-core.c
2400
dprintk(q, 1, "queue is not currently set up for mmap\n");
drivers/media/common/videobuf2/videobuf2-core.c
2404
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
2405
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-core.c
2413
*vb = vb2_get_buffer(q, buffer);
drivers/media/common/videobuf2/videobuf2-core.c
2422
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
drivers/media/common/videobuf2/videobuf2-core.c
2429
if (q->memory != VB2_MEMORY_MMAP) {
drivers/media/common/videobuf2/videobuf2-core.c
2430
dprintk(q, 1, "queue is not currently set up for mmap\n");
drivers/media/common/videobuf2/videobuf2-core.c
2434
if (!q->mem_ops->get_dmabuf) {
drivers/media/common/videobuf2/videobuf2-core.c
2435
dprintk(q, 1, "queue does not support DMA buffer exporting\n");
drivers/media/common/videobuf2/videobuf2-core.c
244
q->alloc_devs[plane] ? : q->dev,
drivers/media/common/videobuf2/videobuf2-core.c
2440
dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n");
drivers/media/common/videobuf2/videobuf2-core.c
2444
if (type != q->type) {
drivers/media/common/videobuf2/videobuf2-core.c
2445
dprintk(q, 1, "invalid buffer type\n");
drivers/media/common/videobuf2/videobuf2-core.c
2450
dprintk(q, 1, "buffer plane out of range\n");
drivers/media/common/videobuf2/videobuf2-core.c
2454
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
2455
dprintk(q, 1, "expbuf: file io in progress\n");
drivers/media/common/videobuf2/videobuf2-core.c
2466
dprintk(q, 1, "failed to export buffer %d, plane %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
2473
dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
drivers/media/common/videobuf2/videobuf2-core.c
2479
dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
drivers/media/common/videobuf2/videobuf2-core.c
2487
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
drivers/media/common/videobuf2/videobuf2-core.c
2499
dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n");
drivers/media/common/videobuf2/videobuf2-core.c
2502
if (q->is_output) {
drivers/media/common/videobuf2/videobuf2-core.c
2504
dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n");
drivers/media/common/videobuf2/videobuf2-core.c
2509
dprintk(q, 1, "invalid vma flags, VM_READ needed\n");
drivers/media/common/videobuf2/videobuf2-core.c
2514
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2520
ret = __find_plane_by_offset(q, offset, &vb, &plane);
drivers/media/common/videobuf2/videobuf2-core.c
2531
dprintk(q, 1,
drivers/media/common/videobuf2/videobuf2-core.c
2547
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2551
dprintk(q, 3, "buffer %u, plane %d successfully mapped\n", vb->index, plane);
drivers/media/common/videobuf2/videobuf2-core.c
2557
unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
drivers/media/common/videobuf2/videobuf2-core.c
2569
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2575
ret = __find_plane_by_offset(q, offset, &vb, &plane);
drivers/media/common/videobuf2/videobuf2-core.c
2580
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2584
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2590
int vb2_core_queue_init(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
2599
if (!q->max_num_buffers)
drivers/media/common/videobuf2/videobuf2-core.c
2600
q->max_num_buffers = VB2_MAX_FRAME;
drivers/media/common/videobuf2/videobuf2-core.c
2603
q->max_num_buffers = min_t(unsigned int, q->max_num_buffers, MAX_BUFFER_INDEX);
drivers/media/common/videobuf2/videobuf2-core.c
2605
if (WARN_ON(!q) ||
drivers/media/common/videobuf2/videobuf2-core.c
2606
WARN_ON(!q->ops) ||
drivers/media/common/videobuf2/videobuf2-core.c
2607
WARN_ON(!q->mem_ops) ||
drivers/media/common/videobuf2/videobuf2-core.c
2608
WARN_ON(!q->type) ||
drivers/media/common/videobuf2/videobuf2-core.c
2609
WARN_ON(!q->io_modes) ||
drivers/media/common/videobuf2/videobuf2-core.c
2610
WARN_ON(!q->ops->queue_setup) ||
drivers/media/common/videobuf2/videobuf2-core.c
2611
WARN_ON(!q->ops->buf_queue))
drivers/media/common/videobuf2/videobuf2-core.c
2614
if (WARN_ON(q->max_num_buffers < VB2_MAX_FRAME) ||
drivers/media/common/videobuf2/videobuf2-core.c
2615
WARN_ON(q->min_queued_buffers > q->max_num_buffers))
drivers/media/common/videobuf2/videobuf2-core.c
2618
if (WARN_ON(q->requires_requests && !q->supports_requests))
drivers/media/common/videobuf2/videobuf2-core.c
2629
if (WARN_ON(q->supports_requests && q->min_queued_buffers))
drivers/media/common/videobuf2/videobuf2-core.c
2638
if (q->min_reqbufs_allocation < q->min_queued_buffers + 1)
drivers/media/common/videobuf2/videobuf2-core.c
2639
q->min_reqbufs_allocation = q->min_queued_buffers + 1;
drivers/media/common/videobuf2/videobuf2-core.c
2641
if (WARN_ON(q->min_reqbufs_allocation > q->max_num_buffers))
drivers/media/common/videobuf2/videobuf2-core.c
2645
if (WARN_ON(!q->lock))
drivers/media/common/videobuf2/videobuf2-core.c
2648
INIT_LIST_HEAD(&q->queued_list);
drivers/media/common/videobuf2/videobuf2-core.c
2649
INIT_LIST_HEAD(&q->done_list);
drivers/media/common/videobuf2/videobuf2-core.c
2650
spin_lock_init(&q->done_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2651
mutex_init(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2652
init_waitqueue_head(&q->done_wq);
drivers/media/common/videobuf2/videobuf2-core.c
2654
q->memory = VB2_MEMORY_UNKNOWN;
drivers/media/common/videobuf2/videobuf2-core.c
2656
if (q->buf_struct_size == 0)
drivers/media/common/videobuf2/videobuf2-core.c
2657
q->buf_struct_size = sizeof(struct vb2_buffer);
drivers/media/common/videobuf2/videobuf2-core.c
2659
if (q->bidirectional)
drivers/media/common/videobuf2/videobuf2-core.c
2660
q->dma_dir = DMA_BIDIRECTIONAL;
drivers/media/common/videobuf2/videobuf2-core.c
2662
q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
drivers/media/common/videobuf2/videobuf2-core.c
2664
if (q->name[0] == '\0')
drivers/media/common/videobuf2/videobuf2-core.c
2665
snprintf(q->name, sizeof(q->name), "%s-%p",
drivers/media/common/videobuf2/videobuf2-core.c
2666
q->is_output ? "out" : "cap", q);
drivers/media/common/videobuf2/videobuf2-core.c
2672
static int __vb2_init_fileio(struct vb2_queue *q, int read);
drivers/media/common/videobuf2/videobuf2-core.c
2673
static int __vb2_cleanup_fileio(struct vb2_queue *q);
drivers/media/common/videobuf2/videobuf2-core.c
2674
void vb2_core_queue_release(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
2676
__vb2_cleanup_fileio(q);
drivers/media/common/videobuf2/videobuf2-core.c
2677
__vb2_queue_cancel(q);
drivers/media/common/videobuf2/videobuf2-core.c
2678
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2679
__vb2_queue_free(q, 0, q->max_num_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
2680
vb2_core_free_buffers_storage(q);
drivers/media/common/videobuf2/videobuf2-core.c
2681
q->is_busy = 0;
drivers/media/common/videobuf2/videobuf2-core.c
2682
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
2686
__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
drivers/media/common/videobuf2/videobuf2-core.c
2700
poll_wait(file, &q->done_wq, wait);
drivers/media/common/videobuf2/videobuf2-core.c
2702
if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
drivers/media/common/videobuf2/videobuf2-core.c
2704
if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
drivers/media/common/videobuf2/videobuf2-core.c
2710
if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
2711
if (!q->is_output && (q->io_modes & VB2_READ) &&
drivers/media/common/videobuf2/videobuf2-core.c
2713
if (__vb2_init_fileio(q, 1))
drivers/media/common/videobuf2/videobuf2-core.c
2716
if (q->is_output && (q->io_modes & VB2_WRITE) &&
drivers/media/common/videobuf2/videobuf2-core.c
2718
if (__vb2_init_fileio(q, 0))
drivers/media/common/videobuf2/videobuf2-core.c
2731
if (!vb2_is_streaming(q) || q->error)
drivers/media/common/videobuf2/videobuf2-core.c
2740
if (q->quirk_poll_must_check_waiting_for_buffers &&
drivers/media/common/videobuf2/videobuf2-core.c
2741
q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
drivers/media/common/videobuf2/videobuf2-core.c
2748
if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q))
drivers/media/common/videobuf2/videobuf2-core.c
2751
if (list_empty(&q->done_list)) {
drivers/media/common/videobuf2/videobuf2-core.c
2756
if (q->last_buffer_dequeued)
drivers/media/common/videobuf2/videobuf2-core.c
2763
spin_lock_irqsave(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
2764
if (!list_empty(&q->done_list))
drivers/media/common/videobuf2/videobuf2-core.c
2765
vb = list_first_entry(&q->done_list, struct vb2_buffer,
drivers/media/common/videobuf2/videobuf2-core.c
2767
spin_unlock_irqrestore(&q->done_lock, flags);
drivers/media/common/videobuf2/videobuf2-core.c
2771
return (q->is_output) ?
drivers/media/common/videobuf2/videobuf2-core.c
2835
static int __vb2_init_fileio(struct vb2_queue *q, int read)
drivers/media/common/videobuf2/videobuf2-core.c
2844
if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
drivers/media/common/videobuf2/videobuf2-core.c
2845
(!read && !(q->io_modes & VB2_WRITE))))
drivers/media/common/videobuf2/videobuf2-core.c
2851
if (!q->mem_ops->vaddr)
drivers/media/common/videobuf2/videobuf2-core.c
2857
if (q->streaming || vb2_get_num_buffers(q) > 0)
drivers/media/common/videobuf2/videobuf2-core.c
2860
dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
2861
(read) ? "read" : "write", q->min_reqbufs_allocation, q->fileio_read_once,
drivers/media/common/videobuf2/videobuf2-core.c
2862
q->fileio_write_immediately);
drivers/media/common/videobuf2/videobuf2-core.c
2868
fileio->read_once = q->fileio_read_once;
drivers/media/common/videobuf2/videobuf2-core.c
2869
fileio->write_immediately = q->fileio_write_immediately;
drivers/media/common/videobuf2/videobuf2-core.c
2875
fileio->count = q->min_reqbufs_allocation;
drivers/media/common/videobuf2/videobuf2-core.c
2877
fileio->type = q->type;
drivers/media/common/videobuf2/videobuf2-core.c
2878
q->fileio = fileio;
drivers/media/common/videobuf2/videobuf2-core.c
2879
ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
drivers/media/common/videobuf2/videobuf2-core.c
2884
dprintk(q, 1, "fileio: more than VB2_MAX_FRAME buffers requested\n");
drivers/media/common/videobuf2/videobuf2-core.c
2894
vb = vb2_get_buffer(q, 0);
drivers/media/common/videobuf2/videobuf2-core.c
2908
for (i = 0; i < vb2_get_num_buffers(q); i++) {
drivers/media/common/videobuf2/videobuf2-core.c
2910
vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
2927
for (i = 0; i < vb2_get_num_buffers(q); i++) {
drivers/media/common/videobuf2/videobuf2-core.c
2928
struct vb2_buffer *vb2 = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
2933
ret = vb2_core_qbuf(q, vb2, NULL, NULL);
drivers/media/common/videobuf2/videobuf2-core.c
2942
fileio->initial_index = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-core.c
2949
ret = vb2_core_streamon(q, q->type);
drivers/media/common/videobuf2/videobuf2-core.c
2957
vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
drivers/media/common/videobuf2/videobuf2-core.c
2960
q->fileio = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
2969
static int __vb2_cleanup_fileio(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
2971
struct vb2_fileio_data *fileio = q->fileio;
drivers/media/common/videobuf2/videobuf2-core.c
2974
vb2_core_streamoff(q, q->type);
drivers/media/common/videobuf2/videobuf2-core.c
2975
q->fileio = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
2977
vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
drivers/media/common/videobuf2/videobuf2-core.c
2979
dprintk(q, 3, "file io emulator closed\n");
drivers/media/common/videobuf2/videobuf2-core.c
2993
static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
drivers/media/common/videobuf2/videobuf2-core.c
2998
bool is_multiplanar = q->is_multiplanar;
drivers/media/common/videobuf2/videobuf2-core.c
3004
bool copy_timestamp = !read && q->copy_timestamp;
drivers/media/common/videobuf2/videobuf2-core.c
3008
dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n",
drivers/media/common/videobuf2/videobuf2-core.c
3015
if (q->waiting_in_dqbuf) {
drivers/media/common/videobuf2/videobuf2-core.c
3016
dprintk(q, 3, "another dup()ped fd is %s\n",
drivers/media/common/videobuf2/videobuf2-core.c
3024
if (!vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
3025
ret = __vb2_init_fileio(q, read);
drivers/media/common/videobuf2/videobuf2-core.c
3026
dprintk(q, 3, "vb2_init_fileio result: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-core.c
3030
fileio = q->fileio;
drivers/media/common/videobuf2/videobuf2-core.c
3036
if (index >= vb2_get_num_buffers(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
3042
ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
drivers/media/common/videobuf2/videobuf2-core.c
3043
dprintk(q, 5, "vb2_dqbuf result: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-core.c
3052
b = vb2_get_buffer(q, index);
drivers/media/common/videobuf2/videobuf2-core.c
3076
dprintk(q, 5, "reducing read count: %zd\n", count);
drivers/media/common/videobuf2/videobuf2-core.c
3082
dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n",
drivers/media/common/videobuf2/videobuf2-core.c
3089
dprintk(q, 3, "error copying data\n");
drivers/media/common/videobuf2/videobuf2-core.c
3104
struct vb2_buffer *b = vb2_get_buffer(q, index);
drivers/media/common/videobuf2/videobuf2-core.c
3110
dprintk(q, 3, "read limit reached\n");
drivers/media/common/videobuf2/videobuf2-core.c
3111
return __vb2_cleanup_fileio(q);
drivers/media/common/videobuf2/videobuf2-core.c
3121
ret = vb2_core_qbuf(q, b, NULL, NULL);
drivers/media/common/videobuf2/videobuf2-core.c
3122
dprintk(q, 5, "vb2_qbuf result: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-core.c
3137
if (fileio->initial_index < vb2_get_num_buffers(q))
drivers/media/common/videobuf2/videobuf2-core.c
3157
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
drivers/media/common/videobuf2/videobuf2-core.c
3160
return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
drivers/media/common/videobuf2/videobuf2-core.c
3164
size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
drivers/media/common/videobuf2/videobuf2-core.c
3167
return __vb2_perform_fileio(q, (char __user *) data, count,
drivers/media/common/videobuf2/videobuf2-core.c
3181
struct vb2_queue *q = data;
drivers/media/common/videobuf2/videobuf2-core.c
3182
struct vb2_threadio_data *threadio = q->threadio;
drivers/media/common/videobuf2/videobuf2-core.c
3188
if (q->is_output) {
drivers/media/common/videobuf2/videobuf2-core.c
3189
prequeue = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-core.c
3190
copy_timestamp = q->copy_timestamp;
drivers/media/common/videobuf2/videobuf2-core.c
3202
vb = vb2_get_buffer(q, index++);
drivers/media/common/videobuf2/videobuf2-core.c
3207
mutex_lock(q->lock);
drivers/media/common/videobuf2/videobuf2-core.c
3209
ret = vb2_core_dqbuf(q, &index, NULL, 0);
drivers/media/common/videobuf2/videobuf2-core.c
3210
mutex_unlock(q->lock);
drivers/media/common/videobuf2/videobuf2-core.c
3211
dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-core.c
3213
vb = vb2_get_buffer(q, index);
drivers/media/common/videobuf2/videobuf2-core.c
3225
mutex_lock(q->lock);
drivers/media/common/videobuf2/videobuf2-core.c
3226
ret = vb2_core_qbuf(q, vb, NULL, NULL);
drivers/media/common/videobuf2/videobuf2-core.c
3227
mutex_unlock(q->lock);
drivers/media/common/videobuf2/videobuf2-core.c
3246
int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
drivers/media/common/videobuf2/videobuf2-core.c
3252
if (q->threadio)
drivers/media/common/videobuf2/videobuf2-core.c
3254
if (vb2_is_busy(q))
drivers/media/common/videobuf2/videobuf2-core.c
3256
if (WARN_ON(q->fileio))
drivers/media/common/videobuf2/videobuf2-core.c
3265
ret = __vb2_init_fileio(q, !q->is_output);
drivers/media/common/videobuf2/videobuf2-core.c
3266
dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-core.c
3269
q->threadio = threadio;
drivers/media/common/videobuf2/videobuf2-core.c
3270
threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
drivers/media/common/videobuf2/videobuf2-core.c
3279
__vb2_cleanup_fileio(q);
drivers/media/common/videobuf2/videobuf2-core.c
3286
int vb2_thread_stop(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
3288
struct vb2_threadio_data *threadio = q->threadio;
drivers/media/common/videobuf2/videobuf2-core.c
3295
vb2_queue_error(q);
drivers/media/common/videobuf2/videobuf2-core.c
3297
__vb2_cleanup_fileio(q);
drivers/media/common/videobuf2/videobuf2-core.c
3300
q->threadio = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
380
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-core.c
403
dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
drivers/media/common/videobuf2/videobuf2-core.c
408
static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
drivers/media/common/videobuf2/videobuf2-core.c
415
if (q->memory == VB2_MEMORY_DMABUF) {
drivers/media/common/videobuf2/videobuf2-core.c
425
if (q->dma_dir == DMA_TO_DEVICE)
drivers/media/common/videobuf2/videobuf2-core.c
435
static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index)
drivers/media/common/videobuf2/videobuf2-core.c
437
WARN_ON(index >= q->max_num_buffers || test_bit(index, q->bufs_bitmap) || vb->vb2_queue);
drivers/media/common/videobuf2/videobuf2-core.c
439
q->bufs[index] = vb;
drivers/media/common/videobuf2/videobuf2-core.c
441
vb->vb2_queue = q;
drivers/media/common/videobuf2/videobuf2-core.c
442
set_bit(index, q->bufs_bitmap);
drivers/media/common/videobuf2/videobuf2-core.c
465
static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
drivers/media/common/videobuf2/videobuf2-core.c
47
#define dprintk(q, level, fmt, arg...) \
drivers/media/common/videobuf2/videobuf2-core.c
472
unsigned long index = q->max_num_buffers;
drivers/media/common/videobuf2/videobuf2-core.c
480
q->max_num_buffers - vb2_get_num_buffers(q));
drivers/media/common/videobuf2/videobuf2-core.c
483
index = bitmap_find_next_zero_area(q->bufs_bitmap, q->max_num_buffers,
drivers/media/common/videobuf2/videobuf2-core.c
486
if (index < q->max_num_buffers)
drivers/media/common/videobuf2/videobuf2-core.c
50
pr_info("[%s] %s: " fmt, (q)->name, __func__, \
drivers/media/common/videobuf2/videobuf2-core.c
502
vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
drivers/media/common/videobuf2/videobuf2-core.c
504
dprintk(q, 1, "memory alloc for buffer struct failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
510
vb->type = q->type;
drivers/media/common/videobuf2/videobuf2-core.c
512
init_buffer_cache_hints(q, vb);
drivers/media/common/videobuf2/videobuf2-core.c
518
vb2_queue_add_buffer(q, vb, index++);
drivers/media/common/videobuf2/videobuf2-core.c
519
call_void_bufop(q, init_buffer, vb);
drivers/media/common/videobuf2/videobuf2-core.c
525
dprintk(q, 1, "failed allocating memory for buffer %d\n",
drivers/media/common/videobuf2/videobuf2-core.c
539
dprintk(q, 1, "buffer %d %p initialization failed\n",
drivers/media/common/videobuf2/videobuf2-core.c
549
dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n",
drivers/media/common/videobuf2/videobuf2-core.c
559
static void __vb2_free_mem(struct vb2_queue *q, unsigned int start, unsigned int count)
drivers/media/common/videobuf2/videobuf2-core.c
565
vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
570
if (q->memory == VB2_MEMORY_MMAP)
drivers/media/common/videobuf2/videobuf2-core.c
572
else if (q->memory == VB2_MEMORY_DMABUF)
drivers/media/common/videobuf2/videobuf2-core.c
584
static void __vb2_queue_free(struct vb2_queue *q, unsigned int start, unsigned int count)
drivers/media/common/videobuf2/videobuf2-core.c
588
lockdep_assert_held(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
592
struct vb2_buffer *vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
599
__vb2_free_mem(q, start, count);
drivers/media/common/videobuf2/videobuf2-core.c
606
if (vb2_get_num_buffers(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
607
bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
drivers/media/common/videobuf2/videobuf2-core.c
608
q->cnt_prepare_streaming != q->cnt_unprepare_streaming;
drivers/media/common/videobuf2/videobuf2-core.c
611
pr_info("unbalanced counters for queue %p:\n", q);
drivers/media/common/videobuf2/videobuf2-core.c
612
if (q->cnt_start_streaming != q->cnt_stop_streaming)
drivers/media/common/videobuf2/videobuf2-core.c
614
q->cnt_queue_setup, q->cnt_start_streaming,
drivers/media/common/videobuf2/videobuf2-core.c
615
q->cnt_stop_streaming);
drivers/media/common/videobuf2/videobuf2-core.c
616
if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming)
drivers/media/common/videobuf2/videobuf2-core.c
618
q->cnt_prepare_streaming, q->cnt_unprepare_streaming);
drivers/media/common/videobuf2/videobuf2-core.c
620
q->cnt_queue_setup = 0;
drivers/media/common/videobuf2/videobuf2-core.c
621
q->cnt_prepare_streaming = 0;
drivers/media/common/videobuf2/videobuf2-core.c
622
q->cnt_start_streaming = 0;
drivers/media/common/videobuf2/videobuf2-core.c
623
q->cnt_stop_streaming = 0;
drivers/media/common/videobuf2/videobuf2-core.c
624
q->cnt_unprepare_streaming = 0;
drivers/media/common/videobuf2/videobuf2-core.c
627
struct vb2_buffer *vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
644
q, i);
drivers/media/common/videobuf2/videobuf2-core.c
679
struct vb2_buffer *vb = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-core.c
688
if (!vb2_get_num_buffers(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
689
q->memory = VB2_MEMORY_UNKNOWN;
drivers/media/common/videobuf2/videobuf2-core.c
690
INIT_LIST_HEAD(&q->queued_list);
drivers/media/common/videobuf2/videobuf2-core.c
694
bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
drivers/media/common/videobuf2/videobuf2-core.c
716
static bool __buffers_in_use(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
719
for (buffer = 0; buffer < q->max_num_buffers; ++buffer) {
drivers/media/common/videobuf2/videobuf2-core.c
720
struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
drivers/media/common/videobuf2/videobuf2-core.c
725
if (vb2_buffer_in_use(q, vb))
drivers/media/common/videobuf2/videobuf2-core.c
731
void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
drivers/media/common/videobuf2/videobuf2-core.c
733
call_void_bufop(q, fill_user_buffer, vb, pb);
drivers/media/common/videobuf2/videobuf2-core.c
741
static int __verify_userptr_ops(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
743
if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
drivers/media/common/videobuf2/videobuf2-core.c
744
!q->mem_ops->put_userptr)
drivers/media/common/videobuf2/videobuf2-core.c
754
static int __verify_mmap_ops(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
756
if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
drivers/media/common/videobuf2/videobuf2-core.c
757
!q->mem_ops->put || !q->mem_ops->mmap)
drivers/media/common/videobuf2/videobuf2-core.c
767
static int __verify_dmabuf_ops(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
769
if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
drivers/media/common/videobuf2/videobuf2-core.c
770
!q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
drivers/media/common/videobuf2/videobuf2-core.c
771
!q->mem_ops->unmap_dmabuf)
drivers/media/common/videobuf2/videobuf2-core.c
777
int vb2_verify_memory_type(struct vb2_queue *q,
drivers/media/common/videobuf2/videobuf2-core.c
782
dprintk(q, 1, "unsupported memory type\n");
drivers/media/common/videobuf2/videobuf2-core.c
786
if (type != q->type) {
drivers/media/common/videobuf2/videobuf2-core.c
787
dprintk(q, 1, "requested type is incorrect\n");
drivers/media/common/videobuf2/videobuf2-core.c
795
if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
796
dprintk(q, 1, "MMAP for current setup unsupported\n");
drivers/media/common/videobuf2/videobuf2-core.c
800
if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
801
dprintk(q, 1, "USERPTR for current setup unsupported\n");
drivers/media/common/videobuf2/videobuf2-core.c
805
if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
806
dprintk(q, 1, "DMABUF for current setup unsupported\n");
drivers/media/common/videobuf2/videobuf2-core.c
815
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-core.c
816
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-core.c
823
static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
drivers/media/common/videobuf2/videobuf2-core.c
825
q->non_coherent_mem = 0;
drivers/media/common/videobuf2/videobuf2-core.c
827
if (!vb2_queue_allows_cache_hints(q))
drivers/media/common/videobuf2/videobuf2-core.c
829
q->non_coherent_mem = non_coherent_mem;
drivers/media/common/videobuf2/videobuf2-core.c
832
static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
drivers/media/common/videobuf2/videobuf2-core.c
834
if (non_coherent_mem != q->non_coherent_mem) {
drivers/media/common/videobuf2/videobuf2-core.c
835
dprintk(q, 1, "memory coherency model mismatch\n");
drivers/media/common/videobuf2/videobuf2-core.c
841
static int vb2_core_allocated_buffers_storage(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
843
if (!q->bufs)
drivers/media/common/videobuf2/videobuf2-core.c
844
q->bufs = kzalloc_objs(*q->bufs, q->max_num_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
845
if (!q->bufs)
drivers/media/common/videobuf2/videobuf2-core.c
848
if (!q->bufs_bitmap)
drivers/media/common/videobuf2/videobuf2-core.c
849
q->bufs_bitmap = bitmap_zalloc(q->max_num_buffers, GFP_KERNEL);
drivers/media/common/videobuf2/videobuf2-core.c
850
if (!q->bufs_bitmap) {
drivers/media/common/videobuf2/videobuf2-core.c
851
kfree(q->bufs);
drivers/media/common/videobuf2/videobuf2-core.c
852
q->bufs = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
859
static void vb2_core_free_buffers_storage(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-core.c
861
kfree(q->bufs);
drivers/media/common/videobuf2/videobuf2-core.c
862
q->bufs = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
863
bitmap_free(q->bufs_bitmap);
drivers/media/common/videobuf2/videobuf2-core.c
864
q->bufs_bitmap = NULL;
drivers/media/common/videobuf2/videobuf2-core.c
867
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
drivers/media/common/videobuf2/videobuf2-core.c
871
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-core.c
877
if (q->streaming) {
drivers/media/common/videobuf2/videobuf2-core.c
878
dprintk(q, 1, "streaming active\n");
drivers/media/common/videobuf2/videobuf2-core.c
882
if (q->waiting_in_dqbuf && *count) {
drivers/media/common/videobuf2/videobuf2-core.c
883
dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
drivers/media/common/videobuf2/videobuf2-core.c
888
(q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
drivers/media/common/videobuf2/videobuf2-core.c
889
!verify_coherency_flags(q, non_coherent_mem)) {
drivers/media/common/videobuf2/videobuf2-core.c
894
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
895
if (debug && q->memory == VB2_MEMORY_MMAP &&
drivers/media/common/videobuf2/videobuf2-core.c
896
__buffers_in_use(q))
drivers/media/common/videobuf2/videobuf2-core.c
897
dprintk(q, 1, "memory in use, orphaning buffers\n");
drivers/media/common/videobuf2/videobuf2-core.c
904
__vb2_queue_cancel(q);
drivers/media/common/videobuf2/videobuf2-core.c
905
__vb2_queue_free(q, 0, q->max_num_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
906
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
908
q->is_busy = 0;
drivers/media/common/videobuf2/videobuf2-core.c
920
num_buffers = max_t(unsigned int, *count, q->min_reqbufs_allocation);
drivers/media/common/videobuf2/videobuf2-core.c
921
num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers);
drivers/media/common/videobuf2/videobuf2-core.c
922
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
drivers/media/common/videobuf2/videobuf2-core.c
927
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
928
ret = vb2_core_allocated_buffers_storage(q);
drivers/media/common/videobuf2/videobuf2-core.c
929
q->memory = memory;
drivers/media/common/videobuf2/videobuf2-core.c
930
mutex_unlock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-core.c
933
set_queue_coherency(q, non_coherent_mem);
drivers/media/common/videobuf2/videobuf2-core.c
939
ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
drivers/media/common/videobuf2/videobuf2-core.c
940
plane_sizes, q->alloc_devs);
drivers/media/common/videobuf2/videobuf2-core.c
958
__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes, &first_index);
drivers/media/common/videobuf2/videobuf2-core.c
962
dprintk(q, 1, "memory allocation failed\n");
drivers/media/common/videobuf2/videobuf2-core.c
971
if (allocated_buffers < q->min_reqbufs_allocation)
drivers/media/common/videobuf2/videobuf2-core.c
987
ret = call_qop(q, queue_setup, q, &num_buffers,
drivers/media/common/videobuf2/videobuf2-core.c
988
&num_planes, plane_sizes, q->alloc_devs);
drivers/media/common/videobuf2/videobuf2-core.c
999
mutex_lock(&q->mmap_lock);
drivers/media/common/videobuf2/videobuf2-dma-contig.c
193
struct vb2_queue *q = buf->vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
198
GFP_KERNEL | q->gfp_flags,
drivers/media/common/videobuf2/videobuf2-dma-contig.c
203
if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
drivers/media/common/videobuf2/videobuf2-dma-contig.c
212
struct vb2_queue *q = buf->vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-dma-contig.c
217
GFP_KERNEL | q->gfp_flags,
drivers/media/common/videobuf2/videobuf2-dvb.c
201
struct list_head *list, *q;
drivers/media/common/videobuf2/videobuf2-dvb.c
221
list_for_each_safe(list, q, &f->felist) {
drivers/media/common/videobuf2/videobuf2-dvb.c
255
struct list_head *list, *q;
drivers/media/common/videobuf2/videobuf2-dvb.c
260
list_for_each_safe(list, q, &f->felist) {
drivers/media/common/videobuf2/videobuf2-dvb.c
277
struct list_head *list, *q;
drivers/media/common/videobuf2/videobuf2-dvb.c
283
list_for_each_safe(list, q, &f->felist) {
drivers/media/common/videobuf2/videobuf2-dvb.c
318
struct list_head *list, *q;
drivers/media/common/videobuf2/videobuf2-dvb.c
322
list_for_each_safe(list, q, &f->felist) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
1229
struct vb2_queue *q = vdev->queue;
drivers/media/common/videobuf2/videobuf2-v4l2.c
1230
struct mutex *lock = q->lock ? q->lock : vdev->lock;
drivers/media/common/videobuf2/videobuf2-v4l2.c
1243
fileio = q->fileio;
drivers/media/common/videobuf2/videobuf2-v4l2.c
1248
if (!fileio && q->fileio)
drivers/media/common/videobuf2/videobuf2-v4l2.c
1249
q->owner = file->private_data;
drivers/media/common/videobuf2/videobuf2-v4l2.c
145
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-v4l2.c
147
if (q->is_output) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
152
if (q->copy_timestamp)
drivers/media/common/videobuf2/videobuf2-v4l2.c
178
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-v4l2.c
186
dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
drivers/media/common/videobuf2/videobuf2-v4l2.c
189
if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
199
dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
332
if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
drivers/media/common/videobuf2/videobuf2-v4l2.c
344
static void set_buffer_cache_hints(struct vb2_queue *q,
drivers/media/common/videobuf2/videobuf2-v4l2.c
348
if (!vb2_queue_allows_cache_hints(q)) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
366
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
drivers/media/common/videobuf2/videobuf2-v4l2.c
375
if (b->type != q->type) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
376
dprintk(q, 1, "%s: invalid buffer type\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
380
if (b->memory != q->memory) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
381
dprintk(q, 1, "%s: invalid memory type\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
39
#define dprintk(q, level, fmt, arg...) \
drivers/media/common/videobuf2/videobuf2-v4l2.c
392
dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
397
set_buffer_cache_hints(q, vb, b);
drivers/media/common/videobuf2/videobuf2-v4l2.c
410
if (q->requires_requests) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
411
dprintk(q, 1, "%s: queue requires requests\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
414
if (q->uses_requests) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
415
dprintk(q, 1, "%s: queue uses requests\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
419
} else if (!q->supports_requests) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
420
dprintk(q, 1, "%s: queue does not support requests\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
422
} else if (q->uses_qbuf) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
423
dprintk(q, 1, "%s: queue does not use requests\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
43
(q)->name, __func__, ## arg); \
drivers/media/common/videobuf2/videobuf2-v4l2.c
432
if (WARN_ON(!q->lock || !p_req))
drivers/media/common/videobuf2/videobuf2-v4l2.c
440
if (WARN_ON(!q->ops->buf_request_complete))
drivers/media/common/videobuf2/videobuf2-v4l2.c
447
if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
drivers/media/common/videobuf2/videobuf2-v4l2.c
448
q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
drivers/media/common/videobuf2/videobuf2-v4l2.c
449
!q->ops->buf_out_validate))
drivers/media/common/videobuf2/videobuf2-v4l2.c
454
dprintk(q, 1, "%s: invalid request_fd\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
464
dprintk(q, 1, "%s: request is not idle\n", opname);
drivers/media/common/videobuf2/videobuf2-v4l2.c
483
struct vb2_queue *q = vb->vb2_queue;
drivers/media/common/videobuf2/videobuf2-v4l2.c
500
if (q->is_multiplanar) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
512
if (q->memory == VB2_MEMORY_MMAP)
drivers/media/common/videobuf2/videobuf2-v4l2.c
514
else if (q->memory == VB2_MEMORY_USERPTR)
drivers/media/common/videobuf2/videobuf2-v4l2.c
516
else if (q->memory == VB2_MEMORY_DMABUF)
drivers/media/common/videobuf2/videobuf2-v4l2.c
528
if (q->memory == VB2_MEMORY_MMAP)
drivers/media/common/videobuf2/videobuf2-v4l2.c
530
else if (q->memory == VB2_MEMORY_USERPTR)
drivers/media/common/videobuf2/videobuf2-v4l2.c
532
else if (q->memory == VB2_MEMORY_DMABUF)
drivers/media/common/videobuf2/videobuf2-v4l2.c
540
b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
drivers/media/common/videobuf2/videobuf2-v4l2.c
541
if (!q->copy_timestamp) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
547
b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
drivers/media/common/videobuf2/videobuf2-v4l2.c
575
if (vb2_buffer_in_use(q, vb))
drivers/media/common/videobuf2/videobuf2-v4l2.c
615
struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
drivers/media/common/videobuf2/videobuf2-v4l2.c
624
for (i = 0; i < q->max_num_buffers; i++) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
625
vb2 = vb2_get_buffer(q, i);
drivers/media/common/videobuf2/videobuf2-v4l2.c
651
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
drivers/media/common/videobuf2/videobuf2-v4l2.c
656
if (b->type != q->type) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
657
dprintk(q, 1, "wrong buffer type\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
661
vb = vb2_get_buffer(q, b->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
663
dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
669
vb2_core_querybuf(q, vb, b);
drivers/media/common/videobuf2/videobuf2-v4l2.c
674
static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
drivers/media/common/videobuf2/videobuf2-v4l2.c
677
if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
689
if (q->io_modes & VB2_MMAP)
drivers/media/common/videobuf2/videobuf2-v4l2.c
691
if (q->io_modes & VB2_USERPTR)
drivers/media/common/videobuf2/videobuf2-v4l2.c
693
if (q->io_modes & VB2_DMABUF)
drivers/media/common/videobuf2/videobuf2-v4l2.c
695
if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
drivers/media/common/videobuf2/videobuf2-v4l2.c
697
if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
drivers/media/common/videobuf2/videobuf2-v4l2.c
699
if (q->supports_requests)
drivers/media/common/videobuf2/videobuf2-v4l2.c
702
*max_num_bufs = q->max_num_buffers;
drivers/media/common/videobuf2/videobuf2-v4l2.c
707
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
drivers/media/common/videobuf2/videobuf2-v4l2.c
709
int ret = vb2_verify_memory_type(q, req->memory, req->type);
drivers/media/common/videobuf2/videobuf2-v4l2.c
712
vb2_set_flags_and_caps(q, req->memory, &flags,
drivers/media/common/videobuf2/videobuf2-v4l2.c
715
return ret ? ret : vb2_core_reqbufs(q, req->memory,
drivers/media/common/videobuf2/videobuf2-v4l2.c
720
int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
drivers/media/common/videobuf2/videobuf2-v4l2.c
726
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
727
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
734
vb = vb2_get_buffer(q, b->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
736
dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
740
ret = vb2_queue_or_prepare_buf(q, mdev, vb, b, true, NULL);
drivers/media/common/videobuf2/videobuf2-v4l2.c
742
return ret ? ret : vb2_core_prepare_buf(q, vb, b);
drivers/media/common/videobuf2/videobuf2-v4l2.c
746
int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
drivers/media/common/videobuf2/videobuf2-v4l2.c
751
int ret = vb2_verify_memory_type(q, create->memory, f->type);
drivers/media/common/videobuf2/videobuf2-v4l2.c
754
create->index = vb2_get_num_buffers(q);
drivers/media/common/videobuf2/videobuf2-v4l2.c
755
vb2_set_flags_and_caps(q, create->memory, &create->flags,
drivers/media/common/videobuf2/videobuf2-v4l2.c
801
return vb2_core_create_bufs(q, create->memory,
drivers/media/common/videobuf2/videobuf2-v4l2.c
810
int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
drivers/media/common/videobuf2/videobuf2-v4l2.c
817
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
818
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
822
vb = vb2_get_buffer(q, b->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
824
dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
828
ret = vb2_queue_or_prepare_buf(q, mdev, vb, b, false, &req);
drivers/media/common/videobuf2/videobuf2-v4l2.c
831
ret = vb2_core_qbuf(q, vb, b, req);
drivers/media/common/videobuf2/videobuf2-v4l2.c
838
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
drivers/media/common/videobuf2/videobuf2-v4l2.c
842
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
843
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
847
if (b->type != q->type) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
848
dprintk(q, 1, "invalid buffer type\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
852
ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
drivers/media/common/videobuf2/videobuf2-v4l2.c
854
if (!q->is_output &&
drivers/media/common/videobuf2/videobuf2-v4l2.c
857
q->last_buffer_dequeued = true;
drivers/media/common/videobuf2/videobuf2-v4l2.c
869
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
drivers/media/common/videobuf2/videobuf2-v4l2.c
871
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
872
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
875
return vb2_core_streamon(q, type);
drivers/media/common/videobuf2/videobuf2-v4l2.c
879
int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
drivers/media/common/videobuf2/videobuf2-v4l2.c
881
if (vb2_fileio_is_active(q)) {
drivers/media/common/videobuf2/videobuf2-v4l2.c
882
dprintk(q, 1, "file io in progress\n");
drivers/media/common/videobuf2/videobuf2-v4l2.c
885
return vb2_core_streamoff(q, type);
drivers/media/common/videobuf2/videobuf2-v4l2.c
889
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
drivers/media/common/videobuf2/videobuf2-v4l2.c
893
vb = vb2_get_buffer(q, eb->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
895
dprintk(q, 1, "can't find the requested buffer %u\n", eb->index);
drivers/media/common/videobuf2/videobuf2-v4l2.c
899
return vb2_core_expbuf(q, &eb->fd, eb->type, vb,
drivers/media/common/videobuf2/videobuf2-v4l2.c
904
int vb2_queue_init_name(struct vb2_queue *q, const char *name)
drivers/media/common/videobuf2/videobuf2-v4l2.c
914
if (WARN_ON(!q) ||
drivers/media/common/videobuf2/videobuf2-v4l2.c
915
WARN_ON(q->timestamp_flags &
drivers/media/common/videobuf2/videobuf2-v4l2.c
921
WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
drivers/media/common/videobuf2/videobuf2-v4l2.c
924
if (q->buf_struct_size == 0)
drivers/media/common/videobuf2/videobuf2-v4l2.c
925
q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
drivers/media/common/videobuf2/videobuf2-v4l2.c
927
q->buf_ops = &v4l2_buf_ops;
drivers/media/common/videobuf2/videobuf2-v4l2.c
928
q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
drivers/media/common/videobuf2/videobuf2-v4l2.c
929
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
drivers/media/common/videobuf2/videobuf2-v4l2.c
930
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
drivers/media/common/videobuf2/videobuf2-v4l2.c
937
q->quirk_poll_must_check_waiting_for_buffers = true;
drivers/media/common/videobuf2/videobuf2-v4l2.c
940
strscpy(q->name, name, sizeof(q->name));
drivers/media/common/videobuf2/videobuf2-v4l2.c
942
q->name[0] = '\0';
drivers/media/common/videobuf2/videobuf2-v4l2.c
944
return vb2_core_queue_init(q);
drivers/media/common/videobuf2/videobuf2-v4l2.c
948
int vb2_queue_init(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-v4l2.c
950
return vb2_queue_init_name(q, NULL);
drivers/media/common/videobuf2/videobuf2-v4l2.c
954
void vb2_queue_release(struct vb2_queue *q)
drivers/media/common/videobuf2/videobuf2-v4l2.c
956
vb2_core_queue_release(q);
drivers/media/common/videobuf2/videobuf2-v4l2.c
960
int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
drivers/media/common/videobuf2/videobuf2-v4l2.c
962
if (type == q->type)
drivers/media/common/videobuf2/videobuf2-v4l2.c
965
if (vb2_is_busy(q))
drivers/media/common/videobuf2/videobuf2-v4l2.c
968
q->type = type;
drivers/media/common/videobuf2/videobuf2-v4l2.c
974
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
drivers/media/common/videobuf2/videobuf2-v4l2.c
979
res = vb2_core_poll(q, file, wait);
drivers/media/dvb-core/dvb_demux.c
541
const u8 *q;
drivers/media/dvb-core/dvb_demux.c
568
q = &buf[p];
drivers/media/dvb-core/dvb_demux.c
570
if (pktsize == 204 && (*q == 0xB8)) {
drivers/media/dvb-core/dvb_demux.c
571
memcpy(demux->tsbuf, q, 188);
drivers/media/dvb-core/dvb_demux.c
573
q = demux->tsbuf;
drivers/media/dvb-core/dvb_demux.c
575
dvb_dmx_swfilter_packet(demux, q);
drivers/media/dvb-core/dvb_vb2.c
147
struct vb2_queue *q = &ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
151
q->type = DVB_BUF_TYPE_CAPTURE;
drivers/media/dvb-core/dvb_vb2.c
153
q->io_modes = VB2_MMAP;
drivers/media/dvb-core/dvb_vb2.c
154
q->drv_priv = ctx;
drivers/media/dvb-core/dvb_vb2.c
155
q->buf_struct_size = sizeof(struct dvb_buffer);
drivers/media/dvb-core/dvb_vb2.c
156
q->min_queued_buffers = 1;
drivers/media/dvb-core/dvb_vb2.c
157
q->ops = &dvb_vb2_qops;
drivers/media/dvb-core/dvb_vb2.c
158
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/dvb-core/dvb_vb2.c
159
q->buf_ops = &dvb_vb2_buf_ops;
drivers/media/dvb-core/dvb_vb2.c
160
q->lock = mutex;
drivers/media/dvb-core/dvb_vb2.c
168
ret = vb2_core_queue_init(q);
drivers/media/dvb-core/dvb_vb2.c
182
struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
185
vb2_core_queue_release(q);
drivers/media/dvb-core/dvb_vb2.c
195
struct vb2_queue *q = &ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
198
ret = vb2_core_streamon(q, q->type);
drivers/media/dvb-core/dvb_vb2.c
212
struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
216
ret = vb2_core_streamoff(q, q->type);
drivers/media/dvb-core/dvb_vb2.c
337
struct vb2_queue *q = &ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
338
struct vb2_buffer *vb2 = vb2_get_buffer(q, b->index);
drivers/media/dvb-core/dvb_vb2.c
351
struct vb2_queue *q = &ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
352
struct vb2_buffer *vb2 = vb2_get_buffer(q, exp->index);
drivers/media/dvb-core/dvb_vb2.c
360
ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, vb2,
drivers/media/dvb-core/dvb_vb2.c
374
struct vb2_queue *q = &ctx->vb_q;
drivers/media/dvb-core/dvb_vb2.c
375
struct vb2_buffer *vb2 = vb2_get_buffer(q, b->index);
drivers/media/dvb-frontends/rtl2832_sdr.c
1143
struct vb2_queue *q = &dev->vb_queue;
drivers/media/dvb-frontends/rtl2832_sdr.c
1149
if (vb2_is_busy(q))
drivers/media/dvb-frontends/sp887x.c
287
unsigned int q, r;
drivers/media/dvb-frontends/sp887x.c
290
q = (r / d);
drivers/media/dvb-frontends/sp887x.c
293
*quotient_i = q;
drivers/media/dvb-frontends/sp887x.c
297
q = (q << 8) | (r / d);
drivers/media/dvb-frontends/sp887x.c
299
*quotient_f = (q << 8) | (r / d);
drivers/media/i2c/adv7511-v4l2.c
1293
u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
drivers/media/i2c/adv7511-v4l2.c
1396
q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
drivers/media/i2c/adv7511-v4l2.c
1398
yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
drivers/media/i2c/adv7511-v4l2.c
1401
q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
drivers/media/i2c/adv7511-v4l2.c
1403
yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
drivers/media/i2c/adv7511-v4l2.c
1410
adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
drivers/media/i2c/cx25840/cx25840-core.c
1034
struct workqueue_struct *q;
drivers/media/i2c/cx25840/cx25840-core.c
1085
q = create_singlethread_workqueue("cx25840_fw");
drivers/media/i2c/cx25840/cx25840-core.c
1086
if (q) {
drivers/media/i2c/cx25840/cx25840-core.c
1088
queue_work(q, &state->fw_work);
drivers/media/i2c/cx25840/cx25840-core.c
1091
destroy_workqueue(q);
drivers/media/i2c/cx25840/cx25840-core.c
697
struct workqueue_struct *q;
drivers/media/i2c/cx25840/cx25840-core.c
723
q = create_singlethread_workqueue("cx25840_fw");
drivers/media/i2c/cx25840/cx25840-core.c
724
if (q) {
drivers/media/i2c/cx25840/cx25840-core.c
726
queue_work(q, &state->fw_work);
drivers/media/i2c/cx25840/cx25840-core.c
729
destroy_workqueue(q);
drivers/media/i2c/cx25840/cx25840-core.c
775
struct workqueue_struct *q;
drivers/media/i2c/cx25840/cx25840-core.c
960
q = create_singlethread_workqueue("cx25840_fw");
drivers/media/i2c/cx25840/cx25840-core.c
961
if (q) {
drivers/media/i2c/cx25840/cx25840-core.c
963
queue_work(q, &state->fw_work);
drivers/media/i2c/cx25840/cx25840-core.c
966
destroy_workqueue(q);
drivers/media/pci/bt8xx/bttv-driver.c
1465
static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/pci/bt8xx/bttv-driver.c
1469
struct bttv *btv = vb2_get_drv_priv(q);
drivers/media/pci/bt8xx/bttv-driver.c
1537
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/bt8xx/bttv-driver.c
1541
struct bttv *btv = vb2_get_drv_priv(q);
drivers/media/pci/bt8xx/bttv-driver.c
1565
static void stop_streaming(struct vb2_queue *q)
drivers/media/pci/bt8xx/bttv-driver.c
1568
struct bttv *btv = vb2_get_drv_priv(q);
drivers/media/pci/bt8xx/bttv-driver.c
1570
vb2_wait_for_all_buffers(q);
drivers/media/pci/bt8xx/bttv-driver.c
1941
struct vb2_queue *q;
drivers/media/pci/bt8xx/bttv-driver.c
1945
q = &btv->capq;
drivers/media/pci/bt8xx/bttv-driver.c
1949
q = &btv->vbiq;
drivers/media/pci/bt8xx/bttv-driver.c
1959
if (vb2_is_busy(q))
drivers/media/pci/bt8xx/bttv-driver.c
3078
struct vb2_queue *q;
drivers/media/pci/bt8xx/bttv-driver.c
3097
q = &btv->capq;
drivers/media/pci/bt8xx/bttv-driver.c
3098
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/bt8xx/bttv-driver.c
3099
q->ops = &bttv_video_qops;
drivers/media/pci/bt8xx/bttv-driver.c
3101
q = &btv->vbiq;
drivers/media/pci/bt8xx/bttv-driver.c
3102
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/pci/bt8xx/bttv-driver.c
3103
q->ops = &bttv_vbi_qops;
drivers/media/pci/bt8xx/bttv-driver.c
3107
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/bt8xx/bttv-driver.c
3108
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ | VB2_DMABUF;
drivers/media/pci/bt8xx/bttv-driver.c
3109
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/bt8xx/bttv-driver.c
3110
q->drv_priv = btv;
drivers/media/pci/bt8xx/bttv-driver.c
3111
q->gfp_flags = __GFP_DMA32;
drivers/media/pci/bt8xx/bttv-driver.c
3112
q->buf_struct_size = sizeof(struct bttv_buffer);
drivers/media/pci/bt8xx/bttv-driver.c
3113
q->lock = &btv->lock;
drivers/media/pci/bt8xx/bttv-driver.c
3114
q->min_queued_buffers = 2;
drivers/media/pci/bt8xx/bttv-driver.c
3115
q->dev = &btv->c.pci->dev;
drivers/media/pci/bt8xx/bttv-driver.c
3116
err = vb2_queue_init(q);
drivers/media/pci/bt8xx/bttv-driver.c
3119
vfd->queue = q;
drivers/media/pci/bt8xx/bttv-vbi.c
124
static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
drivers/media/pci/bt8xx/bttv-vbi.c
128
struct bttv *btv = vb2_get_drv_priv(q);
drivers/media/pci/bt8xx/bttv-vbi.c
151
static void stop_streaming_vbi(struct vb2_queue *q)
drivers/media/pci/bt8xx/bttv-vbi.c
153
struct bttv *btv = vb2_get_drv_priv(q);
drivers/media/pci/bt8xx/bttv-vbi.c
156
vb2_wait_for_all_buffers(q);
drivers/media/pci/bt8xx/bttv-vbi.c
60
static int queue_setup_vbi(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/pci/bt8xx/bttv-vbi.c
64
struct bttv *btv = vb2_get_drv_priv(q);
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
220
rc = vb2_thread_start(&s->q, alsa_fnc, s, s->vdev.name);
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
236
vb2_thread_stop(&s->q);
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
381
rc = vb2_thread_start(&s->q, alsa_pb_fnc, s, s->vdev.name);
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
398
vb2_thread_stop(&s->q);
drivers/media/pci/cobalt/cobalt-alsa-pcm.c
468
s->q.gfp_flags |= __GFP_ZERO;
drivers/media/pci/cobalt/cobalt-driver.h
211
struct vb2_queue q;
drivers/media/pci/cobalt/cobalt-irq.c
167
if ((edge & mask & dma_fifo_mask) && vb2_is_streaming(&s->q)) {
drivers/media/pci/cobalt/cobalt-v4l2.c
1204
struct vb2_queue *q = &s->q;
drivers/media/pci/cobalt/cobalt-v4l2.c
123
struct vb2_queue *q = vb->vb2_queue;
drivers/media/pci/cobalt/cobalt-v4l2.c
124
struct cobalt_stream *s = q->drv_priv;
drivers/media/pci/cobalt/cobalt-v4l2.c
1252
q->type = s->is_output ? V4L2_BUF_TYPE_VIDEO_OUTPUT :
drivers/media/pci/cobalt/cobalt-v4l2.c
1254
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/pci/cobalt/cobalt-v4l2.c
1255
q->io_modes |= s->is_output ? VB2_WRITE : VB2_READ;
drivers/media/pci/cobalt/cobalt-v4l2.c
1256
q->drv_priv = s;
drivers/media/pci/cobalt/cobalt-v4l2.c
1257
q->buf_struct_size = sizeof(struct cobalt_buffer);
drivers/media/pci/cobalt/cobalt-v4l2.c
1258
q->ops = &cobalt_qops;
drivers/media/pci/cobalt/cobalt-v4l2.c
1259
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cobalt/cobalt-v4l2.c
1260
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cobalt/cobalt-v4l2.c
1261
q->min_queued_buffers = 2;
drivers/media/pci/cobalt/cobalt-v4l2.c
1262
q->lock = &s->lock;
drivers/media/pci/cobalt/cobalt-v4l2.c
1263
q->dev = &cobalt->pci_dev->dev;
drivers/media/pci/cobalt/cobalt-v4l2.c
1264
vdev->queue = q;
drivers/media/pci/cobalt/cobalt-v4l2.c
1272
ret = vb2_queue_init(q);
drivers/media/pci/cobalt/cobalt-v4l2.c
277
static int cobalt_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cobalt/cobalt-v4l2.c
279
struct cobalt_stream *s = q->drv_priv;
drivers/media/pci/cobalt/cobalt-v4l2.c
34
static int cobalt_queue_setup(struct vb2_queue *q,
drivers/media/pci/cobalt/cobalt-v4l2.c
375
if (!wait_event_timeout(s->q.done_wq, is_dma_done(s),
drivers/media/pci/cobalt/cobalt-v4l2.c
38
struct cobalt_stream *s = q->drv_priv;
drivers/media/pci/cobalt/cobalt-v4l2.c
384
static void cobalt_stop_streaming(struct vb2_queue *q)
drivers/media/pci/cobalt/cobalt-v4l2.c
386
struct cobalt_stream *s = q->drv_priv;
drivers/media/pci/cobalt/cobalt-v4l2.c
630
if (vb2_is_busy(&s->q))
drivers/media/pci/cobalt/cobalt-v4l2.c
796
if (vb2_is_busy(&s->q))
drivers/media/pci/cobalt/cobalt-v4l2.c
915
if (vb2_is_busy(&s->q) && (pix->pixelformat != s->pixfmt ||
drivers/media/pci/cobalt/cobalt-v4l2.c
980
if (vb2_is_busy(&s->q))
drivers/media/pci/cx18/cx18-fileops.c
291
const u8 *q;
drivers/media/pci/cx18/cx18-fileops.c
297
q = memchr(p, 0, start + len - p);
drivers/media/pci/cx18/cx18-fileops.c
298
if (q == NULL)
drivers/media/pci/cx18/cx18-fileops.c
300
p = q + 1;
drivers/media/pci/cx18/cx18-fileops.c
306
if ((char *)q + 15 >= buf->buf + buf->bytesused ||
drivers/media/pci/cx18/cx18-fileops.c
307
q[1] != 0 || q[2] != 1 || q[3] != ch)
drivers/media/pci/cx18/cx18-fileops.c
313
if ((q[6] & 0xc0) != 0x80)
drivers/media/pci/cx18/cx18-fileops.c
316
if (((q[7] & 0xc0) == 0x80 && /* PTS only */
drivers/media/pci/cx18/cx18-fileops.c
317
(q[9] & 0xf0) == 0x20) || /* PTS only */
drivers/media/pci/cx18/cx18-fileops.c
318
((q[7] & 0xc0) == 0xc0 && /* PTS & DTS */
drivers/media/pci/cx18/cx18-fileops.c
319
(q[9] & 0xf0) == 0x30)) { /* DTS follows */
drivers/media/pci/cx18/cx18-fileops.c
323
p = q + 9; /* Skip this video PES hdr */
drivers/media/pci/cx18/cx18-fileops.c
331
stuffing = q[13] & 7;
drivers/media/pci/cx18/cx18-fileops.c
334
if (q[14 + i] != 0xff)
drivers/media/pci/cx18/cx18-fileops.c
337
(q[4] & 0xc4) == 0x44 && /* marker check */
drivers/media/pci/cx18/cx18-fileops.c
338
(q[12] & 3) == 3 && /* marker check */
drivers/media/pci/cx18/cx18-fileops.c
339
q[14 + stuffing] == 0 && /* PES Pack or Sys Hdr */
drivers/media/pci/cx18/cx18-fileops.c
340
q[15 + stuffing] == 0 &&
drivers/media/pci/cx18/cx18-fileops.c
341
q[16 + stuffing] == 1) {
drivers/media/pci/cx18/cx18-fileops.c
344
len = (char *)q - start;
drivers/media/pci/cx18/cx18-queue.c
36
void cx18_queue_init(struct cx18_queue *q)
drivers/media/pci/cx18/cx18-queue.c
38
INIT_LIST_HEAD(&q->list);
drivers/media/pci/cx18/cx18-queue.c
39
atomic_set(&q->depth, 0);
drivers/media/pci/cx18/cx18-queue.c
40
q->bytesused = 0;
drivers/media/pci/cx18/cx18-queue.c
44
struct cx18_queue *q, int to_front)
drivers/media/pci/cx18/cx18-queue.c
47
if (q != &s->q_full) {
drivers/media/pci/cx18/cx18-queue.c
56
if (q == &s->q_busy &&
drivers/media/pci/cx18/cx18-queue.c
57
atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
drivers/media/pci/cx18/cx18-queue.c
58
q = &s->q_free;
drivers/media/pci/cx18/cx18-queue.c
60
spin_lock(&q->lock);
drivers/media/pci/cx18/cx18-queue.c
63
list_add(&mdl->list, &q->list); /* LIFO */
drivers/media/pci/cx18/cx18-queue.c
65
list_add_tail(&mdl->list, &q->list); /* FIFO */
drivers/media/pci/cx18/cx18-queue.c
66
q->bytesused += mdl->bytesused - mdl->readpos;
drivers/media/pci/cx18/cx18-queue.c
67
atomic_inc(&q->depth);
drivers/media/pci/cx18/cx18-queue.c
69
spin_unlock(&q->lock);
drivers/media/pci/cx18/cx18-queue.c
70
return q;
drivers/media/pci/cx18/cx18-queue.c
73
struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
drivers/media/pci/cx18/cx18-queue.c
77
spin_lock(&q->lock);
drivers/media/pci/cx18/cx18-queue.c
78
if (!list_empty(&q->list)) {
drivers/media/pci/cx18/cx18-queue.c
79
mdl = list_first_entry(&q->list, struct cx18_mdl, list);
drivers/media/pci/cx18/cx18-queue.c
81
q->bytesused -= mdl->bytesused - mdl->readpos;
drivers/media/pci/cx18/cx18-queue.c
83
atomic_dec(&q->depth);
drivers/media/pci/cx18/cx18-queue.c
85
spin_unlock(&q->lock);
drivers/media/pci/cx18/cx18-queue.h
56
struct cx18_queue *q, int to_front);
drivers/media/pci/cx18/cx18-queue.h
60
struct cx18_queue *q)
drivers/media/pci/cx18/cx18-queue.h
62
return _cx18_enqueue(s, mdl, q, 0); /* FIFO */
drivers/media/pci/cx18/cx18-queue.h
67
struct cx18_queue *q)
drivers/media/pci/cx18/cx18-queue.h
69
return _cx18_enqueue(s, mdl, q, 1); /* LIFO */
drivers/media/pci/cx18/cx18-queue.h
72
void cx18_queue_init(struct cx18_queue *q);
drivers/media/pci/cx18/cx18-queue.h
73
struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
drivers/media/pci/cx18/cx18-streams.c
689
struct cx18_queue *q;
drivers/media/pci/cx18/cx18-streams.c
697
q = cx18_enqueue(s, mdl, &s->q_busy);
drivers/media/pci/cx18/cx18-streams.c
698
if (q != &s->q_busy)
drivers/media/pci/cx18/cx18-streams.c
699
return q; /* The firmware has the max MDLs it can handle */
drivers/media/pci/cx18/cx18-streams.c
705
return q;
drivers/media/pci/cx18/cx18-streams.c
711
struct cx18_queue *q;
drivers/media/pci/cx18/cx18-streams.c
723
q = _cx18_stream_put_mdl_fw(s, mdl);
drivers/media/pci/cx18/cx18-streams.c
725
&& q == &s->q_busy);
drivers/media/pci/cx18/cx18-vbi.c
116
memcpy(q, p + 4, line_size - 4 - hdr_size);
drivers/media/pci/cx18/cx18-vbi.c
117
q += line_size - 4 - hdr_size;
drivers/media/pci/cx18/cx18-vbi.c
119
memset(q, (int) *p, hdr_size);
drivers/media/pci/cx18/cx18-vbi.c
121
memcpy(q, p + 4, line_size - 4);
drivers/media/pci/cx18/cx18-vbi.c
122
q += line_size - 4;
drivers/media/pci/cx18/cx18-vbi.c
99
u8 *q = buf;
drivers/media/pci/cx23885/cx23885-417.c
1123
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx23885/cx23885-417.c
1127
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-417.c
1167
static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx23885/cx23885-417.c
1169
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-417.c
1194
static void cx23885_stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx23885/cx23885-417.c
1196
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-417.c
1494
struct vb2_queue *q;
drivers/media/pci/cx23885/cx23885-417.c
1522
q = &dev->vb2_mpegq;
drivers/media/pci/cx23885/cx23885-417.c
1523
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/cx23885/cx23885-417.c
1524
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx23885/cx23885-417.c
1525
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx23885/cx23885-417.c
1526
q->min_queued_buffers = 2;
drivers/media/pci/cx23885/cx23885-417.c
1527
q->drv_priv = dev;
drivers/media/pci/cx23885/cx23885-417.c
1528
q->buf_struct_size = sizeof(struct cx23885_buffer);
drivers/media/pci/cx23885/cx23885-417.c
1529
q->ops = &cx23885_qops;
drivers/media/pci/cx23885/cx23885-417.c
1530
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx23885/cx23885-417.c
1531
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx23885/cx23885-417.c
1532
q->lock = &dev->lock;
drivers/media/pci/cx23885/cx23885-417.c
1533
q->dev = &dev->pci->dev;
drivers/media/pci/cx23885/cx23885-417.c
1535
err = vb2_queue_init(q);
drivers/media/pci/cx23885/cx23885-417.c
1540
dev->v4l_device->queue = q;
drivers/media/pci/cx23885/cx23885-core.c
1398
struct cx23885_dmaqueue *q,
drivers/media/pci/cx23885/cx23885-core.c
1453
q->count = 0;
drivers/media/pci/cx23885/cx23885-core.c
1649
struct cx23885_dmaqueue *q = &port->mpegq;
drivers/media/pci/cx23885/cx23885-core.c
1654
while (!list_empty(&q->active)) {
drivers/media/pci/cx23885/cx23885-core.c
1655
buf = list_entry(q->active.next, struct cx23885_buffer,
drivers/media/pci/cx23885/cx23885-core.c
425
struct cx23885_dmaqueue *q, u32 count)
drivers/media/pci/cx23885/cx23885-core.c
432
if (list_empty(&q->active))
drivers/media/pci/cx23885/cx23885-core.c
434
buf = list_entry(q->active.next,
drivers/media/pci/cx23885/cx23885-core.c
438
buf->vb.sequence = q->count++;
drivers/media/pci/cx23885/cx23885-core.c
439
if (count != (q->count % 65536)) {
drivers/media/pci/cx23885/cx23885-core.c
441
buf->vb.vb2_buf.index, count, q->count);
drivers/media/pci/cx23885/cx23885-core.c
444
buf->vb.vb2_buf.index, count, q->count);
drivers/media/pci/cx23885/cx23885-core.c
450
count_delta = ((int)count - (int)(q->count % 65536));
drivers/media/pci/cx23885/cx23885-dvb.c
150
static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx23885/cx23885-dvb.c
152
struct cx23885_tsport *port = q->drv_priv;
drivers/media/pci/cx23885/cx23885-dvb.c
161
static void cx23885_stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx23885/cx23885-dvb.c
163
struct cx23885_tsport *port = q->drv_priv;
drivers/media/pci/cx23885/cx23885-dvb.c
2642
struct vb2_queue *q;
drivers/media/pci/cx23885/cx23885-dvb.c
2664
q = &fe0->dvb.dvbq;
drivers/media/pci/cx23885/cx23885-dvb.c
2665
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/cx23885/cx23885-dvb.c
2666
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx23885/cx23885-dvb.c
2667
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx23885/cx23885-dvb.c
2668
q->min_queued_buffers = 2;
drivers/media/pci/cx23885/cx23885-dvb.c
2669
q->drv_priv = port;
drivers/media/pci/cx23885/cx23885-dvb.c
2670
q->buf_struct_size = sizeof(struct cx23885_buffer);
drivers/media/pci/cx23885/cx23885-dvb.c
2671
q->ops = &dvb_qops;
drivers/media/pci/cx23885/cx23885-dvb.c
2672
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx23885/cx23885-dvb.c
2673
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx23885/cx23885-dvb.c
2674
q->lock = &dev->lock;
drivers/media/pci/cx23885/cx23885-dvb.c
2675
q->dev = &dev->pci->dev;
drivers/media/pci/cx23885/cx23885-dvb.c
2677
err = vb2_queue_init(q);
drivers/media/pci/cx23885/cx23885-dvb.c
88
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx23885/cx23885-dvb.c
92
struct cx23885_tsport *port = q->drv_priv;
drivers/media/pci/cx23885/cx23885-vbi.c
114
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx23885/cx23885-vbi.c
118
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-vbi.c
189
struct cx23885_dmaqueue *q = &dev->vbiq;
drivers/media/pci/cx23885/cx23885-vbi.c
197
if (list_empty(&q->active)) {
drivers/media/pci/cx23885/cx23885-vbi.c
199
list_add_tail(&buf->queue, &q->active);
drivers/media/pci/cx23885/cx23885-vbi.c
206
prev = list_entry(q->active.prev, struct cx23885_buffer,
drivers/media/pci/cx23885/cx23885-vbi.c
209
list_add_tail(&buf->queue, &q->active);
drivers/media/pci/cx23885/cx23885-vbi.c
217
static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx23885/cx23885-vbi.c
219
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-vbi.c
228
static void cx23885_stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx23885/cx23885-vbi.c
230
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-vbi.c
87
struct cx23885_dmaqueue *q,
drivers/media/pci/cx23885/cx23885-vbi.c
99
q->count = 0;
drivers/media/pci/cx23885/cx23885-video.c
101
buf->vb.vb2_buf.index, count, q->count);
drivers/media/pci/cx23885/cx23885-video.c
1238
struct vb2_queue *q;
drivers/media/pci/cx23885/cx23885-video.c
1319
q = &dev->vb2_vidq;
drivers/media/pci/cx23885/cx23885-video.c
1320
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/cx23885/cx23885-video.c
1321
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx23885/cx23885-video.c
1322
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx23885/cx23885-video.c
1323
q->min_queued_buffers = 2;
drivers/media/pci/cx23885/cx23885-video.c
1324
q->drv_priv = dev;
drivers/media/pci/cx23885/cx23885-video.c
1325
q->buf_struct_size = sizeof(struct cx23885_buffer);
drivers/media/pci/cx23885/cx23885-video.c
1326
q->ops = &cx23885_video_qops;
drivers/media/pci/cx23885/cx23885-video.c
1327
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx23885/cx23885-video.c
1328
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx23885/cx23885-video.c
1329
q->lock = &dev->lock;
drivers/media/pci/cx23885/cx23885-video.c
1330
q->dev = &dev->pci->dev;
drivers/media/pci/cx23885/cx23885-video.c
1332
err = vb2_queue_init(q);
drivers/media/pci/cx23885/cx23885-video.c
1336
q = &dev->vb2_vbiq;
drivers/media/pci/cx23885/cx23885-video.c
1337
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/pci/cx23885/cx23885-video.c
1338
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx23885/cx23885-video.c
1339
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx23885/cx23885-video.c
1340
q->min_queued_buffers = 2;
drivers/media/pci/cx23885/cx23885-video.c
1341
q->drv_priv = dev;
drivers/media/pci/cx23885/cx23885-video.c
1342
q->buf_struct_size = sizeof(struct cx23885_buffer);
drivers/media/pci/cx23885/cx23885-video.c
1343
q->ops = &cx23885_vbi_qops;
drivers/media/pci/cx23885/cx23885-video.c
1344
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx23885/cx23885-video.c
1345
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx23885/cx23885-video.c
1346
q->lock = &dev->lock;
drivers/media/pci/cx23885/cx23885-video.c
1347
q->dev = &dev->pci->dev;
drivers/media/pci/cx23885/cx23885-video.c
1349
err = vb2_queue_init(q);
drivers/media/pci/cx23885/cx23885-video.c
306
struct cx23885_dmaqueue *q,
drivers/media/pci/cx23885/cx23885-video.c
320
q->count = 0;
drivers/media/pci/cx23885/cx23885-video.c
333
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx23885/cx23885-video.c
337
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-video.c
463
struct cx23885_dmaqueue *q = &dev->vidq;
drivers/media/pci/cx23885/cx23885-video.c
473
if (list_empty(&q->active)) {
drivers/media/pci/cx23885/cx23885-video.c
474
list_add_tail(&buf->queue, &q->active);
drivers/media/pci/cx23885/cx23885-video.c
479
prev = list_entry(q->active.prev, struct cx23885_buffer,
drivers/media/pci/cx23885/cx23885-video.c
481
list_add_tail(&buf->queue, &q->active);
drivers/media/pci/cx23885/cx23885-video.c
489
static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx23885/cx23885-video.c
491
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-video.c
500
static void cx23885_stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx23885/cx23885-video.c
502
struct cx23885_dev *dev = q->drv_priv;
drivers/media/pci/cx23885/cx23885-video.c
89
struct cx23885_dmaqueue *q, u32 count)
drivers/media/pci/cx23885/cx23885-video.c
93
if (list_empty(&q->active))
drivers/media/pci/cx23885/cx23885-video.c
95
buf = list_entry(q->active.next,
drivers/media/pci/cx23885/cx23885-video.c
98
buf->vb.sequence = q->count++;
drivers/media/pci/cx23885/cx23885.h
528
struct cx23885_dmaqueue *q,
drivers/media/pci/cx23885/cx23885.h
579
struct cx23885_dmaqueue *q, u32 count);
drivers/media/pci/cx25821/cx25821-alsa.c
62
struct cx25821_dmaqueue q;
drivers/media/pci/cx25821/cx25821-video.c
127
static int cx25821_queue_setup(struct vb2_queue *q,
drivers/media/pci/cx25821/cx25821-video.c
131
struct cx25821_channel *chan = q->drv_priv;
drivers/media/pci/cx25821/cx25821-video.c
243
struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq;
drivers/media/pci/cx25821/cx25821-video.c
250
if (list_empty(&q->active)) {
drivers/media/pci/cx25821/cx25821-video.c
251
list_add_tail(&buf->queue, &q->active);
drivers/media/pci/cx25821/cx25821-video.c
254
prev = list_entry(q->active.prev, struct cx25821_buffer,
drivers/media/pci/cx25821/cx25821-video.c
256
list_add_tail(&buf->queue, &q->active);
drivers/media/pci/cx25821/cx25821-video.c
261
static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx25821/cx25821-video.c
263
struct cx25821_channel *chan = q->drv_priv;
drivers/media/pci/cx25821/cx25821-video.c
274
static void cx25821_stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx25821/cx25821-video.c
276
struct cx25821_channel *chan = q->drv_priv;
drivers/media/pci/cx25821/cx25821-video.c
59
struct cx25821_dmaqueue *q,
drivers/media/pci/cx25821/cx25821-video.c
679
struct vb2_queue *q;
drivers/media/pci/cx25821/cx25821-video.c
724
q = &chan->vidq;
drivers/media/pci/cx25821/cx25821-video.c
726
q->type = is_output ? V4L2_BUF_TYPE_VIDEO_OUTPUT :
drivers/media/pci/cx25821/cx25821-video.c
728
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/pci/cx25821/cx25821-video.c
729
q->io_modes |= is_output ? VB2_WRITE : VB2_READ;
drivers/media/pci/cx25821/cx25821-video.c
730
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx25821/cx25821-video.c
731
q->min_queued_buffers = 2;
drivers/media/pci/cx25821/cx25821-video.c
732
q->drv_priv = chan;
drivers/media/pci/cx25821/cx25821-video.c
733
q->buf_struct_size = sizeof(struct cx25821_buffer);
drivers/media/pci/cx25821/cx25821-video.c
734
q->ops = &cx25821_video_qops;
drivers/media/pci/cx25821/cx25821-video.c
735
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx25821/cx25821-video.c
736
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx25821/cx25821-video.c
737
q->lock = &dev->lock;
drivers/media/pci/cx25821/cx25821-video.c
738
q->dev = &dev->pci->dev;
drivers/media/pci/cx25821/cx25821-video.c
741
err = vb2_queue_init(q);
drivers/media/pci/cx25821/cx25821-video.c
754
vdev->queue = q;
drivers/media/pci/cx25821/cx25821-video.h
40
struct cx25821_dmaqueue *q,
drivers/media/pci/cx88/cx88-alsa.c
55
struct cx88_dmaqueue q;
drivers/media/pci/cx88/cx88-blackbird.c
1155
struct vb2_queue *q;
drivers/media/pci/cx88/cx88-blackbird.c
1192
q = &dev->vb2_mpegq;
drivers/media/pci/cx88/cx88-blackbird.c
1193
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/cx88/cx88-blackbird.c
1194
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx88/cx88-blackbird.c
1195
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx88/cx88-blackbird.c
1196
q->min_queued_buffers = 2;
drivers/media/pci/cx88/cx88-blackbird.c
1197
q->drv_priv = dev;
drivers/media/pci/cx88/cx88-blackbird.c
1198
q->buf_struct_size = sizeof(struct cx88_buffer);
drivers/media/pci/cx88/cx88-blackbird.c
1199
q->ops = &blackbird_qops;
drivers/media/pci/cx88/cx88-blackbird.c
1200
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx88/cx88-blackbird.c
1201
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx88/cx88-blackbird.c
1202
q->lock = &core->lock;
drivers/media/pci/cx88/cx88-blackbird.c
1203
q->dev = &dev->pci->dev;
drivers/media/pci/cx88/cx88-blackbird.c
1205
err = vb2_queue_init(q);
drivers/media/pci/cx88/cx88-blackbird.c
658
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx88/cx88-blackbird.c
662
struct cx8802_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-blackbird.c
702
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx88/cx88-blackbird.c
704
struct cx8802_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-blackbird.c
752
static void stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx88/cx88-blackbird.c
754
struct cx8802_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-core.c
521
struct cx88_dmaqueue *q, u32 count)
drivers/media/pci/cx88/cx88-core.c
525
buf = list_entry(q->active.next,
drivers/media/pci/cx88/cx88-core.c
529
buf->vb.sequence = q->count++;
drivers/media/pci/cx88/cx88-dvb.c
120
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx88/cx88-dvb.c
122
struct cx8802_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-dvb.c
131
static void stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx88/cx88-dvb.c
133
struct cx8802_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-dvb.c
1764
struct vb2_queue *q;
drivers/media/pci/cx88/cx88-dvb.c
1773
q = &fe->dvb.dvbq;
drivers/media/pci/cx88/cx88-dvb.c
1774
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/cx88/cx88-dvb.c
1775
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx88/cx88-dvb.c
1776
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx88/cx88-dvb.c
1777
q->min_queued_buffers = 2;
drivers/media/pci/cx88/cx88-dvb.c
1778
q->drv_priv = dev;
drivers/media/pci/cx88/cx88-dvb.c
1779
q->buf_struct_size = sizeof(struct cx88_buffer);
drivers/media/pci/cx88/cx88-dvb.c
1780
q->ops = &dvb_qops;
drivers/media/pci/cx88/cx88-dvb.c
1781
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx88/cx88-dvb.c
1782
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx88/cx88-dvb.c
1783
q->lock = &core->lock;
drivers/media/pci/cx88/cx88-dvb.c
1784
q->dev = &dev->pci->dev;
drivers/media/pci/cx88/cx88-dvb.c
1786
err = vb2_queue_init(q);
drivers/media/pci/cx88/cx88-dvb.c
75
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx88/cx88-dvb.c
79
struct cx8802_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-mpeg.c
163
q->count = 0;
drivers/media/pci/cx88/cx88-mpeg.c
199
struct cx88_dmaqueue *q)
drivers/media/pci/cx88/cx88-mpeg.c
204
if (list_empty(&q->active))
drivers/media/pci/cx88/cx88-mpeg.c
207
buf = list_entry(q->active.next, struct cx88_buffer, list);
drivers/media/pci/cx88/cx88-mpeg.c
210
cx8802_start_dma(dev, q, buf);
drivers/media/pci/cx88/cx88-mpeg.c
216
int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
drivers/media/pci/cx88/cx88-mpeg.c
274
struct cx88_dmaqueue *q = &dev->mpegq;
drivers/media/pci/cx88/cx88-mpeg.c
279
while (!list_empty(&q->active)) {
drivers/media/pci/cx88/cx88-mpeg.c
280
buf = list_entry(q->active.next, struct cx88_buffer, list);
drivers/media/pci/cx88/cx88-mpeg.c
73
struct cx88_dmaqueue *q,
drivers/media/pci/cx88/cx88-vbi.c
103
if (list_empty(&q->active))
drivers/media/pci/cx88/cx88-vbi.c
106
buf = list_entry(q->active.next, struct cx88_buffer, list);
drivers/media/pci/cx88/cx88-vbi.c
109
cx8800_start_vbi_dma(dev, q, buf);
drivers/media/pci/cx88/cx88-vbi.c
115
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx88/cx88-vbi.c
119
struct cx8800_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-vbi.c
172
struct cx88_dmaqueue *q = &dev->vbiq;
drivers/media/pci/cx88/cx88-vbi.c
179
if (list_empty(&q->active)) {
drivers/media/pci/cx88/cx88-vbi.c
180
list_add_tail(&buf->list, &q->active);
drivers/media/pci/cx88/cx88-vbi.c
186
prev = list_entry(q->active.prev, struct cx88_buffer, list);
drivers/media/pci/cx88/cx88-vbi.c
187
list_add_tail(&buf->list, &q->active);
drivers/media/pci/cx88/cx88-vbi.c
194
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx88/cx88-vbi.c
196
struct cx8800_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-vbi.c
205
static void stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx88/cx88-vbi.c
207
struct cx8800_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-vbi.c
52
struct cx88_dmaqueue *q,
drivers/media/pci/cx88/cx88-vbi.c
67
q->count = 0;
drivers/media/pci/cx88/cx88-vbi.c
99
struct cx88_dmaqueue *q)
drivers/media/pci/cx88/cx88-video.c
1260
struct vb2_queue *q;
drivers/media/pci/cx88/cx88-video.c
1408
q = &dev->vb2_vidq;
drivers/media/pci/cx88/cx88-video.c
1409
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/cx88/cx88-video.c
1410
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx88/cx88-video.c
1411
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx88/cx88-video.c
1412
q->min_queued_buffers = 2;
drivers/media/pci/cx88/cx88-video.c
1413
q->drv_priv = dev;
drivers/media/pci/cx88/cx88-video.c
1414
q->buf_struct_size = sizeof(struct cx88_buffer);
drivers/media/pci/cx88/cx88-video.c
1415
q->ops = &cx8800_video_qops;
drivers/media/pci/cx88/cx88-video.c
1416
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx88/cx88-video.c
1417
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx88/cx88-video.c
1418
q->lock = &core->lock;
drivers/media/pci/cx88/cx88-video.c
1419
q->dev = &dev->pci->dev;
drivers/media/pci/cx88/cx88-video.c
1421
err = vb2_queue_init(q);
drivers/media/pci/cx88/cx88-video.c
1425
q = &dev->vb2_vbiq;
drivers/media/pci/cx88/cx88-video.c
1426
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/pci/cx88/cx88-video.c
1427
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/pci/cx88/cx88-video.c
1428
q->gfp_flags = GFP_DMA32;
drivers/media/pci/cx88/cx88-video.c
1429
q->min_queued_buffers = 2;
drivers/media/pci/cx88/cx88-video.c
1430
q->drv_priv = dev;
drivers/media/pci/cx88/cx88-video.c
1431
q->buf_struct_size = sizeof(struct cx88_buffer);
drivers/media/pci/cx88/cx88-video.c
1432
q->ops = &cx8800_vbi_qops;
drivers/media/pci/cx88/cx88-video.c
1433
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/cx88/cx88-video.c
1434
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/cx88/cx88-video.c
1435
q->lock = &core->lock;
drivers/media/pci/cx88/cx88-video.c
1436
q->dev = &dev->pci->dev;
drivers/media/pci/cx88/cx88-video.c
1438
err = vb2_queue_init(q);
drivers/media/pci/cx88/cx88-video.c
350
struct cx88_dmaqueue *q,
drivers/media/pci/cx88/cx88-video.c
363
q->count = 0;
drivers/media/pci/cx88/cx88-video.c
405
struct cx88_dmaqueue *q)
drivers/media/pci/cx88/cx88-video.c
409
if (!list_empty(&q->active)) {
drivers/media/pci/cx88/cx88-video.c
410
buf = list_entry(q->active.next, struct cx88_buffer, list);
drivers/media/pci/cx88/cx88-video.c
413
start_video_dma(dev, q, buf);
drivers/media/pci/cx88/cx88-video.c
420
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/cx88/cx88-video.c
424
struct cx8800_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-video.c
507
struct cx88_dmaqueue *q = &dev->vidq;
drivers/media/pci/cx88/cx88-video.c
514
if (list_empty(&q->active)) {
drivers/media/pci/cx88/cx88-video.c
515
list_add_tail(&buf->list, &q->active);
drivers/media/pci/cx88/cx88-video.c
521
prev = list_entry(q->active.prev, struct cx88_buffer, list);
drivers/media/pci/cx88/cx88-video.c
522
list_add_tail(&buf->list, &q->active);
drivers/media/pci/cx88/cx88-video.c
529
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/cx88/cx88-video.c
531
struct cx8800_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88-video.c
540
static void stop_streaming(struct vb2_queue *q)
drivers/media/pci/cx88/cx88-video.c
542
struct cx8800_dev *dev = q->drv_priv;
drivers/media/pci/cx88/cx88.h
617
struct cx88_dmaqueue *q, u32 count);
drivers/media/pci/cx88/cx88.h
663
int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q);
drivers/media/pci/cx88/cx88.h
715
int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
drivers/media/pci/cx88/cx88.h
720
struct cx88_dmaqueue *q,
drivers/media/pci/ddbridge/ddbridge-mci.h
206
s16 q;
drivers/media/pci/dt3155/dt3155.c
146
static int dt3155_start_streaming(struct vb2_queue *q, unsigned count)
drivers/media/pci/dt3155/dt3155.c
148
struct dt3155_priv *pd = vb2_get_drv_priv(q);
drivers/media/pci/dt3155/dt3155.c
174
static void dt3155_stop_streaming(struct vb2_queue *q)
drivers/media/pci/dt3155/dt3155.c
176
struct dt3155_priv *pd = vb2_get_drv_priv(q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1000
atomic_set(&q->frame_sequence, 0);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1008
r = video_device_pipeline_start(&q->vdev, &q->pipe);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1012
r = cio2_hw_init(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1017
r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1026
cio2_hw_exit(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1028
video_device_pipeline_stop(&q->vdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1031
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1039
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1043
if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1046
cio2_hw_exit(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1048
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1049
video_device_pipeline_stop(&q->vdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1088
struct cio2_queue *q = file_to_cio2_queue(file);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1090
f->fmt.pix_mp = q->format;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1128
struct cio2_queue *q = file_to_cio2_queue(file);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1131
q->format = f->fmt.pix_mp;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1300
struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1315
if (source_fmt.format.width != q->format.width ||
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1316
source_fmt.format.height != q->format.height) {
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1318
q->format.width, q->format.height,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1323
if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1367
struct cio2_queue *q;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1377
q = &cio2->queue[s_asd->csi2.port];
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1379
q->csi2 = s_asd->csi2;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1380
q->sensor = sd;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1381
q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1403
struct cio2_queue *q;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1408
q = &cio2->queue[s_asd->csi2.port];
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1411
&q->subdev_pads[CIO2_PAD_SINK], 0);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1488
static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1493
struct video_device *vdev = &q->vdev;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1494
struct vb2_queue *vbq = &q->vbq;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1495
struct v4l2_subdev *subdev = &q->subdev;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1499
mutex_init(&q->lock);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1501
q->format.width = default_width;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1502
q->format.height = default_height;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1503
q->format.pixelformat = formats[0].fourcc;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1504
q->format.colorspace = V4L2_COLORSPACE_RAW;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1505
q->format.field = V4L2_FIELD_NONE;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1506
q->format.num_planes = 1;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1507
q->format.plane_fmt[0].bytesperline =
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1508
cio2_bytesperline(q->format.width);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1509
q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1510
q->format.height;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1513
r = cio2_fbpt_init(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1518
q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1520
q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1523
r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1529
q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1531
r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1544
CIO2_ENTITY_NAME " %td", q - cio2->queue);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1569
vbq->lock = &q->lock;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1578
"%s %td", CIO2_NAME, q - cio2->queue);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1584
vdev->queue = &q->vbq;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1603
vb2_video_unregister_device(&q->vdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1611
cio2_fbpt_exit(q, dev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1613
mutex_destroy(&q->lock);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1618
static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1620
vb2_video_unregister_device(&q->vdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1621
media_entity_cleanup(&q->vdev.entity);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1622
v4l2_device_unregister_subdev(&q->subdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1623
media_entity_cleanup(&q->subdev.entity);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1624
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1625
mutex_destroy(&q->lock);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1863
static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1867
for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1869
if (q->bufs[j])
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1876
arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1878
arrange(q->bufs, sizeof(struct cio2_buffer *),
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1890
cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1897
struct cio2_queue *q = cio2->cur_queue;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1905
r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1911
cio2_hw_exit(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1920
cio2_fbpt_rearrange(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1921
q->bufs_first = 0;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1922
q->bufs_next = 0;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1930
struct cio2_queue *q = cio2->cur_queue;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1943
r = cio2_hw_init(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1949
r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
1952
cio2_hw_exit(cio2, q);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
230
static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
234
q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
236
if (!q->fbpt)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
242
static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
244
dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
307
static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
315
src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
318
q->subdev.name, src_pad);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
353
static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
365
u8 lanes, csi2bus = q->csi2.port;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
370
state = v4l2_subdev_lock_and_get_active_state(&q->subdev);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
380
lanes = q->csi2.lanes;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
382
r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
386
writel(timing.clk_termen, q->csi_rx_base +
drivers/media/pci/intel/ipu3/ipu3-cio2.c
388
writel(timing.clk_settle, q->csi_rx_base +
drivers/media/pci/intel/ipu3/ipu3-cio2.c
392
writel(timing.dat_termen, q->csi_rx_base +
drivers/media/pci/intel/ipu3/ipu3-cio2.c
394
writel(timing.dat_settle, q->csi_rx_base +
drivers/media/pci/intel/ipu3/ipu3-cio2.c
416
q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
418
q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
425
writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
drivers/media/pci/intel/ipu3/ipu3-cio2.c
430
q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
drivers/media/pci/intel/ipu3/ipu3-cio2.c
432
q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
435
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
436
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
437
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
438
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
451
q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
drivers/media/pci/intel/ipu3/ipu3-cio2.c
452
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
drivers/media/pci/intel/ipu3/ipu3-cio2.c
453
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
456
writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
489
writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
drivers/media/pci/intel/ipu3/ipu3-cio2.c
508
base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
drivers/media/pci/intel/ipu3/ipu3-cio2.c
511
writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
517
writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
518
writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
523
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
532
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
533
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
534
writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
535
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
556
struct cio2_queue *q = cio2->cur_queue;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
565
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
drivers/media/pci/intel/ipu3/ipu3-cio2.c
576
b = q->bufs[q->bufs_first];
drivers/media/pci/intel/ipu3/ipu3-cio2.c
582
q->bufs[q->bufs_first] = NULL;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
583
atomic_dec(&q->bufs_queued);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
588
b->vbb.sequence = atomic_read(&q->frame_sequence);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
595
atomic_inc(&q->frame_sequence);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
597
q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
598
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
drivers/media/pci/intel/ipu3/ipu3-cio2.c
602
static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
611
.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
drivers/media/pci/intel/ipu3/ipu3-cio2.c
614
v4l2_event_queue(q->subdev.devnode, &event);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
795
static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
801
if (q->bufs[i]) {
drivers/media/pci/intel/ipu3/ipu3-cio2.c
802
atomic_dec(&q->bufs_queued);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
803
vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
drivers/media/pci/intel/ipu3/ipu3-cio2.c
805
q->bufs[i] = NULL;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
818
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
821
if (*num_planes && *num_planes < q->format.num_planes)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
824
for (i = 0; i < q->format.num_planes; ++i) {
drivers/media/pci/intel/ipu3/ipu3-cio2.c
825
if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
drivers/media/pci/intel/ipu3/ipu3-cio2.c
827
sizes[i] = q->format.plane_fmt[i].sizeimage;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
831
*num_planes = q->format.num_planes;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
836
q->bufs[i] = NULL;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
837
cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
839
atomic_set(&q->bufs_queued, 0);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
840
q->bufs_first = 0;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
841
q->bufs_next = 0;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
906
struct cio2_queue *q =
drivers/media/pci/intel/ipu3/ipu3-cio2.c
911
unsigned int i, j, next = q->bufs_next;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
912
int bufs_queued = atomic_inc_return(&q->bufs_queued);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
954
if (!q->bufs[next]) {
drivers/media/pci/intel/ipu3/ipu3-cio2.c
955
q->bufs[next] = b;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
956
entry = &q->fbpt[next * CIO2_MAX_LOPS];
drivers/media/pci/intel/ipu3/ipu3-cio2.c
959
q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
drivers/media/pci/intel/ipu3/ipu3-cio2.c
962
q->format.plane_fmt[j].sizeimage);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
972
atomic_dec(&q->bufs_queued);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
994
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
drivers/media/pci/intel/ipu3/ipu3-cio2.c
999
cio2->cur_queue = q;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
135
static void ipu6_sys_queue_init(struct ipu6_fw_sys_queue *q, unsigned int size,
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
141
q->size = size + 1;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
142
q->token_size = token_size;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
145
q->host_address = res->host_address;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
147
q->vied_address = res->vied_address;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
151
q->wr_reg = res->reg;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
153
q->rd_reg = res->reg;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
339
struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
340
void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
348
if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
354
packets = q->size - (wr - rd + 1);
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
361
return (void *)((uintptr_t)q->host_address + index * q->token_size);
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
367
struct ipu6_fw_sys_queue *q = &ctx->input_queue[q_nbr];
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
368
void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
371
if (wr >= q->size)
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
380
struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
381
void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
388
if (WARN_ON_ONCE(wr >= q->size || rd >= q->size))
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
392
wr += q->size;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
398
return (void *)((uintptr_t)q->host_address + rd * q->token_size);
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
404
struct ipu6_fw_sys_queue *q = &ctx->output_queue[q_nbr];
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
405
void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
drivers/media/pci/intel/ipu6/ipu6-fw-com.c
408
if (rd >= q->size)
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
532
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
534
struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
56
static int ipu6_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
60
struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
634
static void stop_streaming(struct vb2_queue *q)
drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
636
struct ipu6_isys_queue *aq = vb2_queue_to_isys_queue(q);
drivers/media/pci/ivtv/ivtv-fileops.c
307
const u8 *q;
drivers/media/pci/ivtv/ivtv-fileops.c
311
while (start + len > p && (q = memchr(p, 0, start + len - p))) {
drivers/media/pci/ivtv/ivtv-fileops.c
312
p = q + 1;
drivers/media/pci/ivtv/ivtv-fileops.c
313
if ((char *)q + 15 >= buf->buf + buf->bytesused ||
drivers/media/pci/ivtv/ivtv-fileops.c
314
q[1] != 0 || q[2] != 1 || q[3] != ch) {
drivers/media/pci/ivtv/ivtv-fileops.c
318
if ((q[6] & 0xc0) != 0x80)
drivers/media/pci/ivtv/ivtv-fileops.c
320
if (((q[7] & 0xc0) == 0x80 && (q[9] & 0xf0) == 0x20) ||
drivers/media/pci/ivtv/ivtv-fileops.c
321
((q[7] & 0xc0) == 0xc0 && (q[9] & 0xf0) == 0x30)) {
drivers/media/pci/ivtv/ivtv-fileops.c
324
p = q + 9;
drivers/media/pci/ivtv/ivtv-fileops.c
328
stuffing = q[13] & 7;
drivers/media/pci/ivtv/ivtv-fileops.c
331
if (q[14 + i] != 0xff)
drivers/media/pci/ivtv/ivtv-fileops.c
333
if (i == stuffing && (q[4] & 0xc4) == 0x44 && (q[12] & 3) == 3 &&
drivers/media/pci/ivtv/ivtv-fileops.c
334
q[14 + stuffing] == 0 && q[15 + stuffing] == 0 &&
drivers/media/pci/ivtv/ivtv-fileops.c
335
q[16 + stuffing] == 1) {
drivers/media/pci/ivtv/ivtv-fileops.c
337
len = (char *)q - start;
drivers/media/pci/ivtv/ivtv-fileops.c
572
struct ivtv_queue q;
drivers/media/pci/ivtv/ivtv-fileops.c
604
ivtv_queue_init(&q);
drivers/media/pci/ivtv/ivtv-fileops.c
640
while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io)))
drivers/media/pci/ivtv/ivtv-fileops.c
641
ivtv_enqueue(s, buf, &q);
drivers/media/pci/ivtv/ivtv-fileops.c
642
while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) {
drivers/media/pci/ivtv/ivtv-fileops.c
643
ivtv_enqueue(s, buf, &q);
drivers/media/pci/ivtv/ivtv-fileops.c
645
if (q.buffers)
drivers/media/pci/ivtv/ivtv-fileops.c
657
while ((buf = ivtv_dequeue(s, &q))) {
drivers/media/pci/ivtv/ivtv-fileops.c
669
ivtv_queue_move(s, &q, NULL, &s->q_free, 0);
drivers/media/pci/ivtv/ivtv-queue.c
32
void ivtv_queue_init(struct ivtv_queue *q)
drivers/media/pci/ivtv/ivtv-queue.c
34
INIT_LIST_HEAD(&q->list);
drivers/media/pci/ivtv/ivtv-queue.c
35
q->buffers = 0;
drivers/media/pci/ivtv/ivtv-queue.c
36
q->length = 0;
drivers/media/pci/ivtv/ivtv-queue.c
37
q->bytesused = 0;
drivers/media/pci/ivtv/ivtv-queue.c
40
void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
drivers/media/pci/ivtv/ivtv-queue.c
45
if (q == &s->q_free) {
drivers/media/pci/ivtv/ivtv-queue.c
52
list_add_tail(&buf->list, &q->list);
drivers/media/pci/ivtv/ivtv-queue.c
53
q->buffers++;
drivers/media/pci/ivtv/ivtv-queue.c
54
q->length += s->buf_size;
drivers/media/pci/ivtv/ivtv-queue.c
55
q->bytesused += buf->bytesused - buf->readpos;
drivers/media/pci/ivtv/ivtv-queue.c
59
struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
drivers/media/pci/ivtv/ivtv-queue.c
65
if (!list_empty(&q->list)) {
drivers/media/pci/ivtv/ivtv-queue.c
66
buf = list_entry(q->list.next, struct ivtv_buffer, list);
drivers/media/pci/ivtv/ivtv-queue.c
67
list_del_init(q->list.next);
drivers/media/pci/ivtv/ivtv-queue.c
68
q->buffers--;
drivers/media/pci/ivtv/ivtv-queue.c
69
q->length -= s->buf_size;
drivers/media/pci/ivtv/ivtv-queue.c
70
q->bytesused -= buf->bytesused - buf->readpos;
drivers/media/pci/ivtv/ivtv-queue.h
60
void ivtv_queue_init(struct ivtv_queue *q);
drivers/media/pci/ivtv/ivtv-queue.h
61
void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q);
drivers/media/pci/ivtv/ivtv-queue.h
62
struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q);
drivers/media/pci/ivtv/ivtv-vbi.c
305
u8 *q = buf;
drivers/media/pci/ivtv/ivtv-vbi.c
316
memcpy(q, p + 4, line_size - 4);
drivers/media/pci/ivtv/ivtv-vbi.c
317
q += line_size - 4;
drivers/media/pci/mgb4/mgb4_vin.c
209
static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
drivers/media/pci/mgb4/mgb4_vin.c
213
struct mgb4_vin_dev *vindev = vb2_get_drv_priv(q);
drivers/media/pci/mgb4/mgb4_vout.c
110
static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
drivers/media/pci/mgb4/mgb4_vout.c
114
struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(q);
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
336
static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
338
struct netup_dma *dma = vb2_get_drv_priv(q);
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
345
static void netup_unidvb_stop_streaming(struct vb2_queue *q)
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
347
struct netup_dma *dma = vb2_get_drv_priv(q);
drivers/media/pci/saa7134/saa7134-core.c
1348
struct saa7134_dmaqueue *q)
drivers/media/pci/saa7134/saa7134-core.c
1354
buf = q->curr;
drivers/media/pci/saa7134/saa7134-core.c
1363
if (!list_empty(&q->queue))
drivers/media/pci/saa7134/saa7134-core.c
1364
next = list_entry(q->queue.next, struct saa7134_buf,
drivers/media/pci/saa7134/saa7134-core.c
261
struct saa7134_dmaqueue *q,
drivers/media/pci/saa7134/saa7134-core.c
269
if (NULL == q->curr) {
drivers/media/pci/saa7134/saa7134-core.c
270
if (!q->need_two) {
drivers/media/pci/saa7134/saa7134-core.c
271
q->curr = buf;
drivers/media/pci/saa7134/saa7134-core.c
273
} else if (list_empty(&q->queue)) {
drivers/media/pci/saa7134/saa7134-core.c
274
list_add_tail(&buf->entry, &q->queue);
drivers/media/pci/saa7134/saa7134-core.c
276
next = list_entry(q->queue.next, struct saa7134_buf,
drivers/media/pci/saa7134/saa7134-core.c
278
q->curr = buf;
drivers/media/pci/saa7134/saa7134-core.c
282
list_add_tail(&buf->entry, &q->queue);
drivers/media/pci/saa7134/saa7134-core.c
289
struct saa7134_dmaqueue *q,
drivers/media/pci/saa7134/saa7134-core.c
292
core_dbg("buffer_finish %p\n", q->curr);
drivers/media/pci/saa7134/saa7134-core.c
295
q->curr->vb2.vb2_buf.timestamp = ktime_get_ns();
drivers/media/pci/saa7134/saa7134-core.c
296
q->curr->vb2.sequence = q->seq_nr++;
drivers/media/pci/saa7134/saa7134-core.c
297
vb2_buffer_done(&q->curr->vb2.vb2_buf, state);
drivers/media/pci/saa7134/saa7134-core.c
298
q->curr = NULL;
drivers/media/pci/saa7134/saa7134-core.c
302
struct saa7134_dmaqueue *q)
drivers/media/pci/saa7134/saa7134-core.c
307
BUG_ON(NULL != q->curr);
drivers/media/pci/saa7134/saa7134-core.c
309
if (!list_empty(&q->queue)) {
drivers/media/pci/saa7134/saa7134-core.c
311
buf = list_entry(q->queue.next, struct saa7134_buf, entry);
drivers/media/pci/saa7134/saa7134-core.c
313
buf, q->queue.prev, q->queue.next);
drivers/media/pci/saa7134/saa7134-core.c
315
if (!list_empty(&q->queue))
drivers/media/pci/saa7134/saa7134-core.c
316
next = list_entry(q->queue.next, struct saa7134_buf, entry);
drivers/media/pci/saa7134/saa7134-core.c
317
q->curr = buf;
drivers/media/pci/saa7134/saa7134-core.c
320
q->queue.prev, q->queue.next);
drivers/media/pci/saa7134/saa7134-core.c
325
timer_delete(&q->timeout);
drivers/media/pci/saa7134/saa7134-core.c
331
struct saa7134_dmaqueue *q = timer_container_of(q, t, timeout);
drivers/media/pci/saa7134/saa7134-core.c
332
struct saa7134_dev *dev = q->dev;
drivers/media/pci/saa7134/saa7134-core.c
344
if (q->curr) {
drivers/media/pci/saa7134/saa7134-core.c
345
core_dbg("timeout on %p\n", q->curr);
drivers/media/pci/saa7134/saa7134-core.c
346
saa7134_buffer_finish(dev, q, VB2_BUF_STATE_ERROR);
drivers/media/pci/saa7134/saa7134-core.c
348
saa7134_buffer_next(dev, q);
drivers/media/pci/saa7134/saa7134-core.c
352
void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
drivers/media/pci/saa7134/saa7134-core.c
359
list_for_each_safe(pos, n, &q->queue) {
drivers/media/pci/saa7134/saa7134-core.c
367
saa7134_buffer_timeout(&q->timeout); /* also calls timer_delete(&q->timeout) */
drivers/media/pci/saa7134/saa7134-dvb.c
1223
struct vb2_queue *q;
drivers/media/pci/saa7134/saa7134-dvb.c
1240
q = &fe0->dvb.dvbq;
drivers/media/pci/saa7134/saa7134-dvb.c
1241
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/saa7134/saa7134-dvb.c
1242
q->io_modes = VB2_MMAP | VB2_READ;
drivers/media/pci/saa7134/saa7134-dvb.c
1243
q->drv_priv = &dev->ts_q;
drivers/media/pci/saa7134/saa7134-dvb.c
1244
q->ops = &saa7134_ts_qops;
drivers/media/pci/saa7134/saa7134-dvb.c
1245
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/saa7134/saa7134-dvb.c
1246
q->buf_struct_size = sizeof(struct saa7134_buf);
drivers/media/pci/saa7134/saa7134-dvb.c
1247
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/saa7134/saa7134-dvb.c
1248
q->lock = &dev->lock;
drivers/media/pci/saa7134/saa7134-dvb.c
1249
q->dev = &dev->pci->dev;
drivers/media/pci/saa7134/saa7134-dvb.c
1250
ret = vb2_queue_init(q);
drivers/media/pci/saa7134/saa7134-empress.c
244
struct vb2_queue *q;
drivers/media/pci/saa7134/saa7134-empress.c
270
q = &dev->empress_vbq;
drivers/media/pci/saa7134/saa7134-empress.c
271
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/saa7134/saa7134-empress.c
277
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/pci/saa7134/saa7134-empress.c
278
q->drv_priv = &dev->ts_q;
drivers/media/pci/saa7134/saa7134-empress.c
279
q->ops = &saa7134_empress_qops;
drivers/media/pci/saa7134/saa7134-empress.c
280
q->gfp_flags = GFP_DMA32;
drivers/media/pci/saa7134/saa7134-empress.c
281
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/saa7134/saa7134-empress.c
282
q->buf_struct_size = sizeof(struct saa7134_buf);
drivers/media/pci/saa7134/saa7134-empress.c
283
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/saa7134/saa7134-empress.c
284
q->lock = &dev->lock;
drivers/media/pci/saa7134/saa7134-empress.c
285
q->dev = &dev->pci->dev;
drivers/media/pci/saa7134/saa7134-empress.c
286
err = vb2_queue_init(q);
drivers/media/pci/saa7134/saa7134-empress.c
292
dev->empress_dev->queue = q;
drivers/media/pci/saa7134/saa7134-ts.c
106
int saa7134_ts_queue_setup(struct vb2_queue *q,
drivers/media/pci/saa7134/saa7134-ts.c
110
struct saa7134_dmaqueue *dmaq = q->drv_priv;
drivers/media/pci/saa7134/saa7134-vbi.c
128
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/saa7134/saa7134-vbi.c
132
struct saa7134_dmaqueue *dmaq = q->drv_priv;
drivers/media/pci/saa7134/saa7134-video.c
1639
struct vb2_queue *q;
drivers/media/pci/saa7134/saa7134-video.c
1695
q = &dev->video_vbq;
drivers/media/pci/saa7134/saa7134-video.c
1696
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/pci/saa7134/saa7134-video.c
1704
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/pci/saa7134/saa7134-video.c
1706
q->io_modes |= VB2_USERPTR;
drivers/media/pci/saa7134/saa7134-video.c
1707
q->drv_priv = &dev->video_q;
drivers/media/pci/saa7134/saa7134-video.c
1708
q->ops = &vb2_qops;
drivers/media/pci/saa7134/saa7134-video.c
1709
q->gfp_flags = GFP_DMA32;
drivers/media/pci/saa7134/saa7134-video.c
1710
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/saa7134/saa7134-video.c
1711
q->buf_struct_size = sizeof(struct saa7134_buf);
drivers/media/pci/saa7134/saa7134-video.c
1712
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/saa7134/saa7134-video.c
1713
q->lock = &dev->lock;
drivers/media/pci/saa7134/saa7134-video.c
1714
q->dev = &dev->pci->dev;
drivers/media/pci/saa7134/saa7134-video.c
1715
ret = vb2_queue_init(q);
drivers/media/pci/saa7134/saa7134-video.c
1720
q = &dev->vbi_vbq;
drivers/media/pci/saa7134/saa7134-video.c
1721
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/pci/saa7134/saa7134-video.c
1723
q->io_modes = VB2_MMAP | VB2_READ;
drivers/media/pci/saa7134/saa7134-video.c
1725
q->io_modes |= VB2_USERPTR;
drivers/media/pci/saa7134/saa7134-video.c
1726
q->drv_priv = &dev->vbi_q;
drivers/media/pci/saa7134/saa7134-video.c
1727
q->ops = &saa7134_vbi_qops;
drivers/media/pci/saa7134/saa7134-video.c
1728
q->gfp_flags = GFP_DMA32;
drivers/media/pci/saa7134/saa7134-video.c
1729
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/pci/saa7134/saa7134-video.c
1730
q->buf_struct_size = sizeof(struct saa7134_buf);
drivers/media/pci/saa7134/saa7134-video.c
1731
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/pci/saa7134/saa7134-video.c
1732
q->lock = &dev->lock;
drivers/media/pci/saa7134/saa7134-video.c
1733
q->dev = &dev->pci->dev;
drivers/media/pci/saa7134/saa7134-video.c
1734
ret = vb2_queue_init(q);
drivers/media/pci/saa7134/saa7134-video.c
750
static int queue_setup(struct vb2_queue *q,
drivers/media/pci/saa7134/saa7134-video.c
754
struct saa7134_dmaqueue *dmaq = q->drv_priv;
drivers/media/pci/saa7134/saa7134.h
753
int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
drivers/media/pci/saa7134/saa7134.h
755
void saa7134_buffer_finish(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
drivers/media/pci/saa7134/saa7134.h
757
void saa7134_buffer_next(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
drivers/media/pci/saa7134/saa7134.h
759
void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
drivers/media/pci/saa7134/saa7134.h
832
int saa7134_ts_queue_setup(struct vb2_queue *q,
drivers/media/pci/saa7164/saa7164-cmd.c
125
wait_queue_head_t *q = NULL;
drivers/media/pci/saa7164/saa7164-cmd.c
139
q = &dev->cmds[tRsp.seqno].wait;
drivers/media/pci/saa7164/saa7164-cmd.c
167
wake_up(q);
drivers/media/pci/saa7164/saa7164-cmd.c
246
wait_queue_head_t *q = NULL;
drivers/media/pci/saa7164/saa7164-cmd.c
259
q = &dev->cmds[seqno].wait;
drivers/media/pci/saa7164/saa7164-cmd.c
263
if (q) {
drivers/media/pci/saa7164/saa7164-cmd.c
278
wait_event_timeout(*q, dev->cmds[seqno].signalled,
drivers/media/pci/saa7164/saa7164-cmd.c
73
wait_queue_head_t *q = NULL;
drivers/media/pci/saa7164/saa7164-cmd.c
86
q = &dev->cmds[tRsp.seqno].wait;
drivers/media/pci/saa7164/saa7164-cmd.c
94
wake_up(q);
drivers/media/pci/saa7164/saa7164-dvb.c
195
struct list_head *p, *q;
drivers/media/pci/saa7164/saa7164-dvb.c
206
list_for_each_safe(p, q, &port->dmaqueue.list) {
drivers/media/pci/saa7164/saa7164-encoder.c
61
struct list_head *c, *n, *p, *q, *l, *v;
drivers/media/pci/saa7164/saa7164-encoder.c
77
list_for_each_safe(p, q, &port->list_buf_used.list) {
drivers/media/pci/saa7164/saa7164-vbi.c
30
struct list_head *c, *n, *p, *q, *l, *v;
drivers/media/pci/saa7164/saa7164-vbi.c
46
list_for_each_safe(p, q, &port->list_buf_used.list) {
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
655
static int solo_enc_queue_setup(struct vb2_queue *q,
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
708
static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
710
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
715
static void solo_enc_stop_streaming(struct vb2_queue *q)
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
717
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
drivers/media/pci/solo6x10/solo6x10-v4l2.c
307
static int solo_queue_setup(struct vb2_queue *q,
drivers/media/pci/solo6x10/solo6x10-v4l2.c
311
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
drivers/media/pci/solo6x10/solo6x10-v4l2.c
322
static int solo_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/solo6x10/solo6x10-v4l2.c
324
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
drivers/media/pci/solo6x10/solo6x10-v4l2.c
330
static void solo_stop_streaming(struct vb2_queue *q)
drivers/media/pci/solo6x10/solo6x10-v4l2.c
332
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
drivers/media/pci/tw5864/tw5864-video.c
183
static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/pci/tw5864/tw5864-video.c
428
static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/tw5864/tw5864-video.c
430
struct tw5864_input *input = vb2_get_drv_priv(q);
drivers/media/pci/tw5864/tw5864-video.c
447
static void tw5864_stop_streaming(struct vb2_queue *q)
drivers/media/pci/tw5864/tw5864-video.c
450
struct tw5864_input *input = vb2_get_drv_priv(q);
drivers/media/pci/tw68/tw68-video.c
358
static int tw68_queue_setup(struct vb2_queue *q,
drivers/media/pci/tw68/tw68-video.c
362
struct tw68_dev *dev = vb2_get_drv_priv(q);
drivers/media/pci/tw68/tw68-video.c
363
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/media/pci/tw68/tw68-video.c
494
static int tw68_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/pci/tw68/tw68-video.c
496
struct tw68_dev *dev = vb2_get_drv_priv(q);
drivers/media/pci/tw68/tw68-video.c
505
static void tw68_stop_streaming(struct vb2_queue *q)
drivers/media/pci/tw68/tw68-video.c
507
struct tw68_dev *dev = vb2_get_drv_priv(q);
drivers/media/platform/allegro-dvt/allegro-core.c
2873
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/allegro-dvt/allegro-core.c
2875
if (V4L2_TYPE_IS_CAPTURE(q->type) &&
drivers/media/platform/allegro-dvt/allegro-core.c
2876
vb2_is_streaming(q) &&
drivers/media/platform/allegro-dvt/allegro-core.c
2894
static int allegro_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/allegro-dvt/allegro-core.c
2896
struct allegro_channel *channel = vb2_get_drv_priv(q);
drivers/media/platform/allegro-dvt/allegro-core.c
2901
V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
drivers/media/platform/allegro-dvt/allegro-core.c
2903
v4l2_m2m_update_start_streaming_state(channel->fh.m2m_ctx, q);
drivers/media/platform/allegro-dvt/allegro-core.c
2905
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/allegro-dvt/allegro-core.c
2913
static void allegro_stop_streaming(struct vb2_queue *q)
drivers/media/platform/allegro-dvt/allegro-core.c
2915
struct allegro_channel *channel = vb2_get_drv_priv(q);
drivers/media/platform/allegro-dvt/allegro-core.c
2922
V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture");
drivers/media/platform/allegro-dvt/allegro-core.c
2924
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/allegro-dvt/allegro-core.c
2949
v4l2_m2m_update_stop_streaming_state(channel->fh.m2m_ctx, q);
drivers/media/platform/allegro-dvt/allegro-core.c
2951
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/platform/allegro-dvt/allegro-core.c
3496
vb2_clear_last_buffer_dequeued(&channel->fh.m2m_ctx->cap_q_ctx.q);
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
545
static int c3_isp_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
551
struct c3_isp_capture *cap = vb2_get_drv_priv(q);
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
618
static int c3_isp_vb2_start_streaming(struct vb2_queue *q,
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
621
struct c3_isp_capture *cap = vb2_get_drv_priv(q);
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
659
static void c3_isp_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/amlogic/c3/isp/c3-isp-capture.c
661
struct c3_isp_capture *cap = vb2_get_drv_priv(q);
drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
729
static int c3_isp_params_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
811
static void c3_isp_params_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
813
struct c3_isp_params *params = vb2_get_drv_priv(q);
drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
145
static int c3_isp_stats_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
207
static void c3_isp_stats_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/amlogic/c3/isp/c3-isp-stats.c
209
struct c3_isp_stats *stats = vb2_get_drv_priv(q);
drivers/media/platform/amphion/vdec.c
358
struct vb2_queue *q;
drivers/media/platform/amphion/vdec.c
368
q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
drivers/media/platform/amphion/vdec.c
369
if (!list_empty(&q->done_list))
drivers/media/platform/amphion/vdec.c
525
struct vb2_queue *q;
drivers/media/platform/amphion/vdec.c
532
q = v4l2_m2m_get_vq(inst->fh.m2m_ctx, f->type);
drivers/media/platform/amphion/vdec.c
533
if (vb2_is_busy(q))
drivers/media/platform/amphion/venc.c
219
struct vb2_queue *q;
drivers/media/platform/amphion/venc.c
223
q = v4l2_m2m_get_vq(inst->fh.m2m_ctx, f->type);
drivers/media/platform/amphion/venc.c
224
if (vb2_is_busy(q))
drivers/media/platform/amphion/vpu_v4l2.c
116
struct vb2_queue *q;
drivers/media/platform/amphion/vpu_v4l2.c
121
q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
drivers/media/platform/amphion/vpu_v4l2.c
122
if (!list_empty(&q->done_list))
drivers/media/platform/amphion/vpu_v4l2.c
125
if (q->last_buffer_dequeued)
drivers/media/platform/amphion/vpu_v4l2.c
128
q->last_buffer_dequeued = true;
drivers/media/platform/amphion/vpu_v4l2.c
129
wake_up(&q->done_wq);
drivers/media/platform/amphion/vpu_v4l2.c
433
struct vb2_queue *q;
drivers/media/platform/amphion/vpu_v4l2.c
439
q = v4l2_m2m_get_src_vq(inst->fh.m2m_ctx);
drivers/media/platform/amphion/vpu_v4l2.c
441
q = v4l2_m2m_get_dst_vq(inst->fh.m2m_ctx);
drivers/media/platform/amphion/vpu_v4l2.c
443
return vb2_get_num_buffers(q);
drivers/media/platform/amphion/vpu_v4l2.c
552
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/amphion/vpu_v4l2.c
566
if (list_empty(&q->done_list))
drivers/media/platform/amphion/vpu_v4l2.c
567
call_void_vop(inst, on_queue_empty, q->type);
drivers/media/platform/amphion/vpu_v4l2.c
588
static int vpu_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/amphion/vpu_v4l2.c
590
struct vpu_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/amphion/vpu_v4l2.c
591
struct vpu_format *fmt = vpu_get_format(inst, q->type);
drivers/media/platform/amphion/vpu_v4l2.c
598
vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
drivers/media/platform/amphion/vpu_v4l2.c
603
inst->id, vpu_type_name(q->type),
drivers/media/platform/amphion/vpu_v4l2.c
612
vb2_get_num_buffers(q));
drivers/media/platform/amphion/vpu_v4l2.c
613
vb2_clear_last_buffer_dequeued(q);
drivers/media/platform/amphion/vpu_v4l2.c
614
ret = call_vop(inst, start, q->type);
drivers/media/platform/amphion/vpu_v4l2.c
616
vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_QUEUED);
drivers/media/platform/amphion/vpu_v4l2.c
621
static void vpu_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/amphion/vpu_v4l2.c
623
struct vpu_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/amphion/vpu_v4l2.c
625
vpu_trace(inst->dev, "[%d] %s\n", inst->id, vpu_type_name(q->type));
drivers/media/platform/amphion/vpu_v4l2.c
627
call_void_vop(inst, stop, q->type);
drivers/media/platform/amphion/vpu_v4l2.c
628
vpu_vb2_buffers_return(inst, q->type, VB2_BUF_STATE_ERROR);
drivers/media/platform/amphion/vpu_v4l2.c
629
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/arm/mali-c55/mali-c55-capture.c
328
static int mali_c55_vb2_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/arm/mali-c55/mali-c55-capture.c
332
struct mali_c55_cap_dev *cap_dev = q->drv_priv;
drivers/media/platform/arm/mali-c55/mali-c55-capture.c
583
static int mali_c55_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/arm/mali-c55/mali-c55-capture.c
585
struct mali_c55_cap_dev *cap_dev = q->drv_priv;
drivers/media/platform/arm/mali-c55/mali-c55-capture.c
635
static void mali_c55_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/arm/mali-c55/mali-c55-capture.c
637
struct mali_c55_cap_dev *cap_dev = q->drv_priv;
drivers/media/platform/arm/mali-c55/mali-c55-params.c
536
mali_c55_params_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/arm/mali-c55/mali-c55-params.c
627
static int mali_c55_params_start_streaming(struct vb2_queue *q,
drivers/media/platform/arm/mali-c55/mali-c55-params.c
630
struct mali_c55_params *params = vb2_get_drv_priv(q);
drivers/media/platform/arm/mali-c55/mali-c55-params.c
662
static void mali_c55_params_stop_streaming(struct vb2_queue *q)
drivers/media/platform/arm/mali-c55/mali-c55-params.c
664
struct mali_c55_params *params = vb2_get_drv_priv(q);
drivers/media/platform/arm/mali-c55/mali-c55-stats.c
142
static int mali_c55_stats_start_streaming(struct vb2_queue *q,
drivers/media/platform/arm/mali-c55/mali-c55-stats.c
145
struct mali_c55_stats *stats = vb2_get_drv_priv(q);
drivers/media/platform/arm/mali-c55/mali-c55-stats.c
177
static void mali_c55_stats_stop_streaming(struct vb2_queue *q)
drivers/media/platform/arm/mali-c55/mali-c55-stats.c
179
struct mali_c55_stats *stats = vb2_get_drv_priv(q);
drivers/media/platform/arm/mali-c55/mali-c55-stats.c
95
mali_c55_stats_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/aspeed/aspeed-video.c
1928
static int aspeed_video_queue_setup(struct vb2_queue *q,
drivers/media/platform/aspeed/aspeed-video.c
1934
struct aspeed_video *video = vb2_get_drv_priv(q);
drivers/media/platform/aspeed/aspeed-video.c
1959
static int aspeed_video_start_streaming(struct vb2_queue *q,
drivers/media/platform/aspeed/aspeed-video.c
1963
struct aspeed_video *video = vb2_get_drv_priv(q);
drivers/media/platform/aspeed/aspeed-video.c
1981
static void aspeed_video_stop_streaming(struct vb2_queue *q)
drivers/media/platform/aspeed/aspeed-video.c
1984
struct aspeed_video *video = vb2_get_drv_priv(q);
drivers/media/platform/atmel/atmel-isi.c
1186
struct vb2_queue *q;
drivers/media/platform/atmel/atmel-isi.c
1208
q = &isi->queue;
drivers/media/platform/atmel/atmel-isi.c
1234
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/atmel/atmel-isi.c
1235
q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
drivers/media/platform/atmel/atmel-isi.c
1236
q->lock = &isi->lock;
drivers/media/platform/atmel/atmel-isi.c
1237
q->drv_priv = isi;
drivers/media/platform/atmel/atmel-isi.c
1238
q->buf_struct_size = sizeof(struct frame_buffer);
drivers/media/platform/atmel/atmel-isi.c
1239
q->ops = &isi_video_qops;
drivers/media/platform/atmel/atmel-isi.c
1240
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/atmel/atmel-isi.c
1241
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/atmel/atmel-isi.c
1242
q->min_queued_buffers = 2;
drivers/media/platform/atmel/atmel-isi.c
1243
q->dev = &pdev->dev;
drivers/media/platform/atmel/atmel-isi.c
1245
ret = vb2_queue_init(q);
drivers/media/platform/broadcom/bcm2835-unicam.c
2243
struct vb2_queue *q = &node->buffer_queue;
drivers/media/platform/broadcom/bcm2835-unicam.c
2254
q->type = type == UNICAM_IMAGE_NODE ? V4L2_BUF_TYPE_VIDEO_CAPTURE
drivers/media/platform/broadcom/bcm2835-unicam.c
2256
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/broadcom/bcm2835-unicam.c
2257
q->drv_priv = node;
drivers/media/platform/broadcom/bcm2835-unicam.c
2258
q->ops = &unicam_video_qops;
drivers/media/platform/broadcom/bcm2835-unicam.c
2259
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/broadcom/bcm2835-unicam.c
2260
q->buf_struct_size = sizeof(struct unicam_buffer);
drivers/media/platform/broadcom/bcm2835-unicam.c
2261
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/broadcom/bcm2835-unicam.c
2262
q->lock = &unicam->lock;
drivers/media/platform/broadcom/bcm2835-unicam.c
2263
q->min_queued_buffers = 1;
drivers/media/platform/broadcom/bcm2835-unicam.c
2264
q->dev = unicam->dev;
drivers/media/platform/broadcom/bcm2835-unicam.c
2266
ret = vb2_queue_init(q);
drivers/media/platform/broadcom/bcm2835-unicam.c
2278
vdev->queue = q;
drivers/media/platform/chips-media/coda/coda-common.c
1968
static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/chips-media/coda/coda-common.c
1970
struct coda_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/coda/coda-common.c
1981
coda_dbg(1, ctx, "start streaming %s\n", v4l2_type_names[q->type]);
drivers/media/platform/chips-media/coda/coda-common.c
1986
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
drivers/media/platform/chips-media/coda/coda-common.c
2089
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
drivers/media/platform/chips-media/coda/coda-common.c
2098
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
drivers/media/platform/chips-media/coda/coda-common.c
2112
static void coda_stop_streaming(struct vb2_queue *q)
drivers/media/platform/chips-media/coda/coda-common.c
2114
struct coda_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/coda/coda-common.c
2121
coda_dbg(1, ctx, "stop streaming %s\n", v4l2_type_names[q->type]);
drivers/media/platform/chips-media/coda/coda-common.c
2123
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
drivers/media/platform/chips-media/wave5/wave5-helper.c
230
void wave5_return_bufs(struct vb2_queue *q, u32 state)
drivers/media/platform/chips-media/wave5/wave5-helper.c
232
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-helper.c
238
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/chips-media/wave5/wave5-helper.h
30
void wave5_return_bufs(struct vb2_queue *q, u32 state);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1356
static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1358
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1362
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1365
v4l2_m2m_update_start_streaming_state(m2m_ctx, q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1367
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && inst->state == VPU_INST_STATE_NONE) {
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1389
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1415
wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1420
static int streamoff_output(struct vb2_queue *q)
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1422
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1474
static int streamoff_capture(struct vb2_queue *q)
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1476
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1516
static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1518
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1523
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1541
v4l2_m2m_update_stop_streaming_state(m2m_ctx, q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1543
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1544
streamoff_output(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1546
streamoff_capture(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
1781
if (!m2m_ctx->cap_q_ctx.q.streaming) {
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
949
static int wave5_vpu_dec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
953
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
955
(q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? inst->src_fmt : inst->dst_fmt;
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
959
*num_buffers, *num_planes, q->type);
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
963
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
966
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1091
static int wave5_vpu_enc_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1095
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1097
(q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? inst->src_fmt : inst->dst_fmt;
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1101
*num_buffers, *num_planes, q->type);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1330
static int wave5_vpu_enc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1332
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1337
v4l2_m2m_update_start_streaming_state(m2m_ctx, q);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1339
if (inst->state == VPU_INST_STATE_NONE && q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1373
(m2m_ctx->cap_q_ctx.q.streaming || q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1400
wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1405
static void streamoff_output(struct vpu_instance *inst, struct vb2_queue *q)
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1417
static void streamoff_capture(struct vpu_instance *inst, struct vb2_queue *q)
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1432
static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q)
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1434
struct vpu_instance *inst = vb2_get_drv_priv(q);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1442
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, q->type);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1464
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1465
streamoff_output(inst, q);
drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
1467
streamoff_capture(inst, q);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1134
static void e5010_vb2_buffers_return(struct vb2_queue *q, enum vb2_buffer_state state)
drivers/media/platform/imagination/e5010-jpeg-enc.c
1137
struct e5010_context *ctx = vb2_get_drv_priv(q);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1139
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/imagination/e5010-jpeg-enc.c
1289
static int e5010_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/imagination/e5010-jpeg-enc.c
1291
struct e5010_context *ctx = vb2_get_drv_priv(q);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1294
struct e5010_q_data *queue = get_queue(ctx, q->type);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1296
v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1314
e5010_vb2_buffers_return(q, VB2_BUF_STATE_QUEUED);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1319
static void e5010_stop_streaming(struct vb2_queue *q)
drivers/media/platform/imagination/e5010-jpeg-enc.c
1321
struct e5010_context *ctx = vb2_get_drv_priv(q);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1323
e5010_vb2_buffers_return(q, VB2_BUF_STATE_ERROR);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1325
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/imagination/e5010-jpeg-enc.c
1326
v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/platform/imagination/e5010-jpeg-enc.c
1328
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1115
struct mtk_jpeg_q_data *q = &ctx->out_q;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1119
q->pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1120
q->pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_601;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1121
q->pix_mp.quantization = V4L2_QUANTIZATION_FULL_RANGE;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1122
q->pix_mp.xfer_func = V4L2_XFER_FUNC_SRGB;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1124
q->fmt = mtk_jpeg_find_format(jpeg->variant->formats,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1128
q->pix_mp.width = MTK_JPEG_MIN_WIDTH;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1129
q->pix_mp.height = MTK_JPEG_MIN_HEIGHT;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1130
mtk_jpeg_try_fmt_mplane(&q->pix_mp, q->fmt);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1132
q = &ctx->cap_q;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1133
q->fmt = mtk_jpeg_find_format(jpeg->variant->formats,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1137
q->pix_mp.colorspace = V4L2_COLORSPACE_SRGB;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1138
q->pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_601;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1139
q->pix_mp.quantization = V4L2_QUANTIZATION_FULL_RANGE;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1140
q->pix_mp.xfer_func = V4L2_XFER_FUNC_SRGB;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1141
q->pix_mp.width = MTK_JPEG_MIN_WIDTH;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1142
q->pix_mp.height = MTK_JPEG_MIN_HEIGHT;
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
1144
mtk_jpeg_try_fmt_mplane(&q->pix_mp, q->fmt);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
665
static int mtk_jpeg_queue_setup(struct vb2_queue *q,
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
671
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
677
q->type, *num_buffers);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
679
q_data = mtk_jpeg_get_q_data(ctx, q->type);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
841
static void mtk_jpeg_enc_stop_streaming(struct vb2_queue *q)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
843
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
846
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
850
static void mtk_jpeg_dec_stop_streaming(struct vb2_queue *q)
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
852
struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
861
V4L2_TYPE_IS_CAPTURE(q->type)) {
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
868
} else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
872
while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
389
static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
391
struct mtk_mdp_ctx *ctx = q->drv_priv;
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
411
static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
413
struct mtk_mdp_ctx *ctx = q->drv_priv;
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
416
vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
419
vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
132
static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
134
struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
140
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
143
if (V4L2_TYPE_IS_CAPTURE(q->type))
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
153
if ((V4L2_TYPE_IS_OUTPUT(q->type) && cap_streaming) ||
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
154
(V4L2_TYPE_IS_CAPTURE(q->type) && out_streaming)) {
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
190
static void mdp_m2m_stop_streaming(struct vb2_queue *q)
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
192
struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
195
vb = mdp_m2m_buf_remove(ctx, q->type);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
198
vb = mdp_m2m_buf_remove(ctx, q->type);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
202
static int mdp_m2m_queue_setup(struct vb2_queue *q,
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
207
struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
211
pix_mp = &ctx_get_frame(ctx, q->type)->format.fmt.pix_mp;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
423
u32 q;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
430
q = f->numerator / f->denominator;
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
431
*r = div_u64(((u64)f->numerator - q * f->denominator) <<
drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
433
return q;
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
487
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
497
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
859
int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
861
struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
869
void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
872
struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
876
ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
878
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
98
int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count);
drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
99
void vb2ops_vdec_stop_streaming(struct vb2_queue *q);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
850
static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
852
struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
866
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
867
if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q))
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
870
if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q))
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
900
for (i = 0; i < vb2_get_num_buffers(q); ++i) {
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
901
struct vb2_buffer *buf = vb2_get_buffer(q, i);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
909
ctx->id, i, q->type, (int)buf->state);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
918
static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
920
struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
924
mtk_v4l2_venc_dbg(2, ctx, "[%d]-> type=%d", ctx->id, q->type);
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
926
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
965
if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
966
vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) ||
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
967
(q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
968
vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) {
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
970
ctx->id, q->type,
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
971
vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
972
vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
drivers/media/platform/microchip/microchip-isc-base.c
1757
struct vb2_queue *q = &isc->vb2_vidq;
drivers/media/platform/microchip/microchip-isc-base.c
1776
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/microchip/microchip-isc-base.c
1777
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/platform/microchip/microchip-isc-base.c
1778
q->drv_priv = isc;
drivers/media/platform/microchip/microchip-isc-base.c
1779
q->buf_struct_size = sizeof(struct isc_buffer);
drivers/media/platform/microchip/microchip-isc-base.c
1780
q->ops = &isc_vb2_ops;
drivers/media/platform/microchip/microchip-isc-base.c
1781
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/microchip/microchip-isc-base.c
1782
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/microchip/microchip-isc-base.c
1783
q->lock = &isc->lock;
drivers/media/platform/microchip/microchip-isc-base.c
1784
q->min_queued_buffers = 1;
drivers/media/platform/microchip/microchip-isc-base.c
1785
q->dev = isc->dev;
drivers/media/platform/microchip/microchip-isc-base.c
1787
ret = vb2_queue_init(q);
drivers/media/platform/microchip/microchip-isc-base.c
1817
vdev->queue = q;
drivers/media/platform/nuvoton/npcm-video.c
1443
static int npcm_video_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/nuvoton/npcm-video.c
1447
struct npcm_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nuvoton/npcm-video.c
1476
static int npcm_video_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/nuvoton/npcm-video.c
1478
struct npcm_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nuvoton/npcm-video.c
1492
static void npcm_video_stop_streaming(struct vb2_queue *q)
drivers/media/platform/nuvoton/npcm-video.c
1494
struct npcm_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nvidia/tegra-vde/h264.c
654
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
drivers/media/platform/nxp/dw100/dw100.c
503
static void dw100_return_all_buffers(struct vb2_queue *q,
drivers/media/platform/nxp/dw100/dw100.c
506
struct dw100_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/dw100/dw100.c
510
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/nxp/dw100/dw100.c
520
static int dw100_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/nxp/dw100/dw100.c
522
struct dw100_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/dw100/dw100.c
523
struct dw100_q_data *q_data = dw100_get_q_data(ctx, q->type);
drivers/media/platform/nxp/dw100/dw100.c
540
dw100_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
drivers/media/platform/nxp/dw100/dw100.c
544
static void dw100_stop_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/dw100/dw100.c
546
struct dw100_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/dw100/dw100.c
548
dw100_return_all_buffers(q, VB2_BUF_STATE_ERROR);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1643
vb2_clear_last_buffer_dequeued(&fh->m2m_ctx->cap_q_ctx.q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1675
vb2_clear_last_buffer_dequeued(&fh->m2m_ctx->cap_q_ctx.q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1680
static int mxc_jpeg_queue_setup(struct vb2_queue *q,
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1686
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1690
q_data = mxc_jpeg_get_q_data(ctx, q->type);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1710
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1716
static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1718
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1719
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, q->type);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1722
v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1724
if (ctx->mxc_jpeg->mode == MXC_JPEG_DECODE && V4L2_TYPE_IS_CAPTURE(q->type))
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1729
if (V4L2_TYPE_IS_CAPTURE(q->type))
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1741
static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1743
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1750
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1759
v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1763
if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1766
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1850
static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1854
bytesperline[0] = q->bytesperline[0];
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1855
bytesperline[1] = q->bytesperline[0]; /*imx-jpeg only support the same line pitch*/
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1861
if (q->fmt->fourcc == V4L2_PIX_FMT_JPEG) {
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1863
q->bytesperline[0] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1864
q->bytesperline[1] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1865
} else if (q->fmt->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1870
q->bytesperline[0] = q->w_adjusted * DIV_ROUND_UP(precision, 8);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1871
q->bytesperline[1] = q->bytesperline[0];
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1872
} else if (q->fmt->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_422) {
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1873
q->bytesperline[0] = q->w_adjusted * DIV_ROUND_UP(precision, 8) * 2;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1874
q->bytesperline[1] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1875
} else if (q->fmt->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_444) {
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1876
q->bytesperline[0] = q->w_adjusted * DIV_ROUND_UP(precision, 8) * q->fmt->nc;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1877
q->bytesperline[1] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1880
q->bytesperline[0] = q->w_adjusted * DIV_ROUND_UP(precision, 8);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1881
q->bytesperline[1] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1884
if (q->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1885
q->bytesperline[0] = max(q->bytesperline[0], bytesperline[0]);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1886
if (q->fmt->mem_planes > 1)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1887
q->bytesperline[1] = max(q->bytesperline[1], bytesperline[1]);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1891
static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1893
if (q->fmt->fourcc == V4L2_PIX_FMT_JPEG) {
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1895
if (!q->sizeimage[0])
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1896
q->sizeimage[0] = 6 * q->w * q->h;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1897
q->sizeimage[1] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1899
if (q->sizeimage[0] > MXC_JPEG_MAX_SIZEIMAGE)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1900
q->sizeimage[0] = MXC_JPEG_MAX_SIZEIMAGE;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1903
q->sizeimage[0] = ALIGN(q->sizeimage[0], 1024);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1905
q->sizeimage[0] = q->bytesperline[0] * q->h_adjusted;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1906
q->sizeimage[1] = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1907
if (q->fmt->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_420)
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
1908
q->sizeimage[1] = q->sizeimage[0] / 2;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2120
struct mxc_jpeg_q_data *q[2] = {out_q, cap_q};
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2132
q[i]->w = MXC_JPEG_DEFAULT_WIDTH;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2133
q[i]->h = MXC_JPEG_DEFAULT_HEIGHT;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2134
q[i]->w_adjusted = MXC_JPEG_DEFAULT_WIDTH;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2135
q[i]->h_adjusted = MXC_JPEG_DEFAULT_HEIGHT;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2136
q[i]->crop.left = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2137
q[i]->crop.top = 0;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2138
q[i]->crop.width = MXC_JPEG_DEFAULT_WIDTH;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2139
q[i]->crop.height = MXC_JPEG_DEFAULT_HEIGHT;
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2140
mxc_jpeg_bytesperline(q[i], q[i]->fmt->precision);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
2141
mxc_jpeg_sizeimage(q[i]);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
594
static void mxc_jpeg_bytesperline(struct mxc_jpeg_q_data *q, u32 precision);
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
595
static void mxc_jpeg_sizeimage(struct mxc_jpeg_q_data *q);
drivers/media/platform/nxp/imx-pxp.c
1568
static int pxp_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/nxp/imx-pxp.c
1570
struct pxp_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx-pxp.c
1571
struct pxp_q_data *q_data = get_q_data(ctx, q->type);
drivers/media/platform/nxp/imx-pxp.c
1577
static void pxp_stop_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx-pxp.c
1579
struct pxp_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx-pxp.c
1584
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/nxp/imx7-media-csi.c
1183
if (vb2_is_busy(&csi->q)) {
drivers/media/platform/nxp/imx7-media-csi.c
1544
struct vb2_queue *vq = &csi->q;
drivers/media/platform/nxp/imx7-media-csi.c
1667
vdev->queue = &csi->q;
drivers/media/platform/nxp/imx7-media-csi.c
1683
vq = &csi->q;
drivers/media/platform/nxp/imx7-media-csi.c
256
struct vb2_queue q; /* The videobuf2 queue */
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
191
static int mxc_isi_m2m_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
197
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
199
mxc_isi_m2m_ctx_qdata(ctx, q->type);
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
238
static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
240
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
302
static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
305
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
307
mxc_isi_m2m_ctx_qdata(ctx, q->type);
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
314
static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
316
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
320
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
331
static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
333
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1019
static void mxc_isi_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1021
struct mxc_isi_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1029
static void mxc_isi_vb2_unprepare_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1031
struct mxc_isi_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1369
struct vb2_queue *q = &video->vb2_q;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1394
vdev->queue = q;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1405
memset(q, 0, sizeof(*q));
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1406
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1407
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1408
q->drv_priv = video;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1409
q->ops = &mxc_isi_vb2_qops;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1410
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1411
q->buf_struct_size = sizeof(struct mxc_isi_buffer);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1412
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1413
q->min_queued_buffers = 2;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1414
q->lock = &video->lock;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1415
q->dev = pipe->isi->dev;
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
1417
ret = vb2_queue_init(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
884
static int mxc_isi_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
890
struct mxc_isi_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
940
static int mxc_isi_vb2_prepare_streaming(struct vb2_queue *q)
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
942
struct mxc_isi_video *video = vb2_get_drv_priv(q);
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
981
static int mxc_isi_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
983
struct mxc_isi_video *video = vb2_get_drv_priv(q);
drivers/media/platform/qcom/camss/camss-video.c
118
static int video_queue_setup(struct vb2_queue *q,
drivers/media/platform/qcom/camss/camss-video.c
122
struct camss_video *video = vb2_get_drv_priv(q);
drivers/media/platform/qcom/camss/camss-video.c
228
static int video_prepare_streaming(struct vb2_queue *q)
drivers/media/platform/qcom/camss/camss-video.c
230
struct camss_video *video = vb2_get_drv_priv(q);
drivers/media/platform/qcom/camss/camss-video.c
243
static int video_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/qcom/camss/camss-video.c
245
struct camss_video *video = vb2_get_drv_priv(q);
drivers/media/platform/qcom/camss/camss-video.c
291
static void video_stop_streaming(struct vb2_queue *q)
drivers/media/platform/qcom/camss/camss-video.c
293
struct camss_video *video = vb2_get_drv_priv(q);
drivers/media/platform/qcom/camss/camss-video.c
326
static void video_unprepare_streaming(struct vb2_queue *q)
drivers/media/platform/qcom/camss/camss-video.c
328
struct camss_video *video = vb2_get_drv_priv(q);
drivers/media/platform/qcom/camss/camss-video.c
680
struct vb2_queue *q;
drivers/media/platform/qcom/camss/camss-video.c
687
q = &video->vb2_q;
drivers/media/platform/qcom/camss/camss-video.c
688
q->drv_priv = video;
drivers/media/platform/qcom/camss/camss-video.c
689
q->mem_ops = &vb2_dma_sg_memops;
drivers/media/platform/qcom/camss/camss-video.c
690
q->ops = &msm_video_vb2_q_ops;
drivers/media/platform/qcom/camss/camss-video.c
691
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/qcom/camss/camss-video.c
692
q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ;
drivers/media/platform/qcom/camss/camss-video.c
693
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/qcom/camss/camss-video.c
694
q->buf_struct_size = sizeof(struct camss_buffer);
drivers/media/platform/qcom/camss/camss-video.c
695
q->dev = video->camss->dev;
drivers/media/platform/qcom/camss/camss-video.c
696
q->lock = &video->q_lock;
drivers/media/platform/qcom/camss/camss-video.c
697
ret = vb2_queue_init(q);
drivers/media/platform/qcom/iris/iris_buffer.c
701
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_buffer.c
703
q = v4l2_m2m_get_src_vq(m2m_ctx);
drivers/media/platform/qcom/iris/iris_buffer.c
704
vb2_queue_error(q);
drivers/media/platform/qcom/iris/iris_buffer.c
705
q = v4l2_m2m_get_dst_vq(m2m_ctx);
drivers/media/platform/qcom/iris/iris_buffer.c
706
vb2_queue_error(q);
drivers/media/platform/qcom/iris/iris_ctrls.c
226
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_ctrls.c
233
q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
drivers/media/platform/qcom/iris/iris_ctrls.c
234
if (vb2_is_streaming(q) &&
drivers/media/platform/qcom/iris/iris_ctrls.c
242
if (vb2_is_streaming(q)) {
drivers/media/platform/qcom/iris/iris_ctrls.c
848
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_ctrls.c
851
q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
drivers/media/platform/qcom/iris/iris_ctrls.c
852
if (vb2_is_streaming(q)) {
drivers/media/platform/qcom/iris/iris_ctrls.c
976
struct vb2_queue *q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
drivers/media/platform/qcom/iris/iris_ctrls.c
982
if (vb2_is_streaming(q))
drivers/media/platform/qcom/iris/iris_vb2.c
107
inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/iris/iris_vb2.c
116
f = V4L2_TYPE_IS_OUTPUT(q->type) ? inst->fmt_src : inst->fmt_dst;
drivers/media/platform/qcom/iris/iris_vb2.c
153
int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/qcom/iris/iris_vb2.c
159
inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/iris/iris_vb2.c
167
if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/platform/qcom/iris/iris_vb2.c
168
!V4L2_TYPE_IS_CAPTURE(q->type)) {
drivers/media/platform/qcom/iris/iris_vb2.c
179
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/qcom/iris/iris_vb2.c
184
} else if (V4L2_TYPE_IS_CAPTURE(q->type)) {
drivers/media/platform/qcom/iris/iris_vb2.c
193
buf_type = iris_v4l2_type_to_driver(q->type);
drivers/media/platform/qcom/iris/iris_vb2.c
220
iris_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
drivers/media/platform/qcom/iris/iris_vb2.c
227
void iris_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/qcom/iris/iris_vb2.c
232
inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/iris/iris_vb2.c
234
if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
drivers/media/platform/qcom/iris/iris_vb2.c
239
if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/platform/qcom/iris/iris_vb2.c
240
!V4L2_TYPE_IS_CAPTURE(q->type))
drivers/media/platform/qcom/iris/iris_vb2.c
243
ret = iris_session_streamoff(inst, q->type);
drivers/media/platform/qcom/iris/iris_vb2.c
248
iris_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
drivers/media/platform/qcom/iris/iris_vb2.c
98
int iris_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/qcom/iris/iris_vb2.h
10
int iris_vb2_queue_setup(struct vb2_queue *q,
drivers/media/platform/qcom/iris/iris_vb2.h
13
int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count);
drivers/media/platform/qcom/iris/iris_vb2.h
14
void iris_vb2_stop_streaming(struct vb2_queue *q);
drivers/media/platform/qcom/iris/iris_vdec.c
211
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_vdec.c
214
q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
drivers/media/platform/qcom/iris/iris_vdec.c
216
if (vb2_is_busy(q))
drivers/media/platform/qcom/iris/iris_vdec.c
414
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_vdec.c
424
q = v4l2_m2m_get_vq(inst->m2m_ctx, vb2->type);
drivers/media/platform/qcom/iris/iris_vdec.c
425
if (!vb2_is_streaming(q)) {
drivers/media/platform/qcom/iris/iris_venc.c
334
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_venc.c
336
q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
drivers/media/platform/qcom/iris/iris_venc.c
338
if (vb2_is_busy(q))
drivers/media/platform/qcom/iris/iris_venc.c
561
struct vb2_queue *q;
drivers/media/platform/qcom/iris/iris_venc.c
571
q = v4l2_m2m_get_vq(inst->m2m_ctx, vb2->type);
drivers/media/platform/qcom/iris/iris_venc.c
572
if (!vb2_is_streaming(q)) {
drivers/media/platform/qcom/venus/helpers.c
1544
void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
drivers/media/platform/qcom/venus/helpers.c
1546
struct venus_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/venus/helpers.c
1576
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/qcom/venus/helpers.c
1592
struct vb2_queue *q;
drivers/media/platform/qcom/venus/helpers.c
1594
q = v4l2_m2m_get_src_vq(m2m_ctx);
drivers/media/platform/qcom/venus/helpers.c
1595
vb2_queue_error(q);
drivers/media/platform/qcom/venus/helpers.c
1596
q = v4l2_m2m_get_dst_vq(m2m_ctx);
drivers/media/platform/qcom/venus/helpers.c
1597
vb2_queue_error(q);
drivers/media/platform/qcom/venus/helpers.h
25
void venus_helper_vb2_stop_streaming(struct vb2_queue *q);
drivers/media/platform/qcom/venus/vdec.c
1186
static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/qcom/venus/vdec.c
1188
struct venus_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/venus/vdec.c
1193
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
drivers/media/platform/qcom/venus/vdec.c
1220
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
drivers/media/platform/qcom/venus/vdec.c
1283
static void vdec_stop_streaming(struct vb2_queue *q)
drivers/media/platform/qcom/venus/vdec.c
1285
struct venus_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/venus/vdec.c
1292
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
drivers/media/platform/qcom/venus/vdec.c
1297
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
drivers/media/platform/qcom/venus/vdec.c
1304
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/qcom/venus/vdec.c
329
struct vb2_queue *q;
drivers/media/platform/qcom/venus/vdec.c
331
q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
drivers/media/platform/qcom/venus/vdec.c
333
if (vb2_is_busy(q))
drivers/media/platform/qcom/venus/vdec.c
919
static int vdec_queue_setup(struct vb2_queue *q,
drivers/media/platform/qcom/venus/vdec.c
923
struct venus_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/venus/vdec.c
931
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
drivers/media/platform/qcom/venus/vdec.c
935
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
drivers/media/platform/qcom/venus/vdec.c
939
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
drivers/media/platform/qcom/venus/vdec.c
943
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
drivers/media/platform/qcom/venus/vdec.c
976
switch (q->type) {
drivers/media/platform/qcom/venus/venc.c
1082
static int venc_queue_setup(struct vb2_queue *q,
drivers/media/platform/qcom/venus/venc.c
1086
struct venus_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/venus/venc.c
1092
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
drivers/media/platform/qcom/venus/venc.c
1096
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
drivers/media/platform/qcom/venus/venc.c
1100
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
drivers/media/platform/qcom/venus/venc.c
1104
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
drivers/media/platform/qcom/venus/venc.c
1136
switch (q->type) {
drivers/media/platform/qcom/venus/venc.c
1249
static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/qcom/venus/venc.c
1251
struct venus_inst *inst = vb2_get_drv_priv(q);
drivers/media/platform/qcom/venus/venc.c
1256
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/qcom/venus/venc.c
1309
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
drivers/media/platform/qcom/venus/venc.c
1310
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/media/platform/qcom/venus/venc.c
245
struct vb2_queue *q;
drivers/media/platform/qcom/venus/venc.c
247
q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
drivers/media/platform/qcom/venus/venc.c
249
if (vb2_is_busy(q))
drivers/media/platform/qcom/venus/venc.c
563
vb2_clear_last_buffer_dequeued(&inst->fh.m2m_ctx->cap_q_ctx.q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1400
struct vb2_queue *q = &node->queue;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1414
q->type = node->buf_type;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1415
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1416
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1417
q->drv_priv = node;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1418
q->ops = &pispbe_node_queue_ops;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1419
q->buf_struct_size = sizeof(struct pispbe_buffer);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1420
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1421
q->dev = pispbe->dev;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1423
q->lock = &node->queue_lock;
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
1425
ret = vb2_queue_init(q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
776
static int pispbe_node_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
780
struct pispbe_node *node = vb2_get_drv_priv(q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
870
static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
872
struct pispbe_node *node = vb2_get_drv_priv(q);
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
906
static void pispbe_node_stop_streaming(struct vb2_queue *q)
drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
908
struct pispbe_node *node = vb2_get_drv_priv(q);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1425
struct vb2_queue *q = &node->buffer_queue;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1428
if (vb2_is_busy(q))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1569
struct vb2_queue *q = &node->buffer_queue;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1574
if (vb2_is_busy(q))
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1945
struct vb2_queue *q;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1986
q = &node->buffer_queue;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1987
q->type = node_supports_image(node) ? node->vid_fmt.type :
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1989
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1990
q->drv_priv = node;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1991
q->ops = &cfe_video_qops;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1992
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1993
q->buf_struct_size = id == FE_CONFIG ? sizeof(struct cfe_config_buffer)
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1995
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1996
q->lock = &node->lock;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1997
q->min_queued_buffers = 1;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1998
q->min_reqbufs_allocation = 3;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
1999
q->dev = &cfe->pdev->dev;
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2001
ret = vb2_queue_init(q);
drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
2019
vdev->queue = q;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1450
struct vb2_queue *q = &vin->queue;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1467
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1468
q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1469
q->lock = &vin->lock;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1470
q->drv_priv = vin;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1471
q->buf_struct_size = sizeof(struct rvin_buffer);
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1472
q->ops = &rvin_qops;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1473
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1474
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1475
q->min_queued_buffers = 4;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1476
q->dev = vin->dev;
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
1478
ret = vb2_queue_init(q);
drivers/media/platform/renesas/rcar_drif.c
922
struct vb2_queue *q = &sdr->vb_queue;
drivers/media/platform/renesas/rcar_drif.c
925
if (vb2_is_busy(q))
drivers/media/platform/renesas/rcar_fdp1.c
1919
static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/renesas/rcar_fdp1.c
1921
struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/renesas/rcar_fdp1.c
1922
struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
drivers/media/platform/renesas/rcar_fdp1.c
1924
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/renesas/rcar_fdp1.c
1958
static void fdp1_stop_streaming(struct vb2_queue *q)
drivers/media/platform/renesas/rcar_fdp1.c
1960
struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/renesas/rcar_fdp1.c
1965
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/renesas/rcar_fdp1.c
1977
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/renesas/renesas-ceu.c
1388
struct vb2_queue *q = &ceudev->vb2_vq;
drivers/media/platform/renesas/renesas-ceu.c
1393
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/renesas/renesas-ceu.c
1394
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/renesas/renesas-ceu.c
1395
q->drv_priv = ceudev;
drivers/media/platform/renesas/renesas-ceu.c
1396
q->ops = &ceu_vb2_ops;
drivers/media/platform/renesas/renesas-ceu.c
1397
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/renesas/renesas-ceu.c
1398
q->buf_struct_size = sizeof(struct ceu_buffer);
drivers/media/platform/renesas/renesas-ceu.c
1399
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/renesas/renesas-ceu.c
1400
q->min_queued_buffers = 2;
drivers/media/platform/renesas/renesas-ceu.c
1401
q->lock = &ceudev->mlock;
drivers/media/platform/renesas/renesas-ceu.c
1402
q->dev = ceudev->v4l2_dev.dev;
drivers/media/platform/renesas/renesas-ceu.c
1404
ret = vb2_queue_init(q);
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
852
struct vb2_queue *q = &cru->queue;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
872
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
873
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
874
q->lock = &cru->lock;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
875
q->drv_priv = cru;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
876
q->buf_struct_size = sizeof(struct rzg2l_cru_buffer);
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
877
q->ops = &rzg2l_cru_qops;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
878
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
879
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
880
q->min_queued_buffers = 4;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
881
q->dev = cru->dev;
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
883
ret = vb2_queue_init(q);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
172
static int rzv2h_ivc_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
176
struct rzv2h_ivc *ivc = vb2_get_drv_priv(q);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
260
static int rzv2h_ivc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
262
struct rzv2h_ivc *ivc = vb2_get_drv_priv(q);
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
292
static void rzv2h_ivc_stop_streaming(struct vb2_queue *q)
drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
294
struct rzv2h_ivc *ivc = vb2_get_drv_priv(q);
drivers/media/platform/renesas/sh_vou.c
1225
struct vb2_queue *q;
drivers/media/platform/renesas/sh_vou.c
1290
q = &vou_dev->queue;
drivers/media/platform/renesas/sh_vou.c
1291
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
drivers/media/platform/renesas/sh_vou.c
1292
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
drivers/media/platform/renesas/sh_vou.c
1293
q->drv_priv = vou_dev;
drivers/media/platform/renesas/sh_vou.c
1294
q->buf_struct_size = sizeof(struct sh_vou_buffer);
drivers/media/platform/renesas/sh_vou.c
1295
q->ops = &sh_vou_qops;
drivers/media/platform/renesas/sh_vou.c
1296
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/renesas/sh_vou.c
1297
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/renesas/sh_vou.c
1298
q->min_queued_buffers = 2;
drivers/media/platform/renesas/sh_vou.c
1299
q->lock = &vou_dev->fop_lock;
drivers/media/platform/renesas/sh_vou.c
1300
q->dev = &pdev->dev;
drivers/media/platform/renesas/sh_vou.c
1301
ret = vb2_queue_init(q);
drivers/media/platform/renesas/sh_vou.c
1305
vdev->queue = q;
drivers/media/platform/rockchip/rga/rga-buf.c
179
static void rga_buf_return_buffers(struct vb2_queue *q,
drivers/media/platform/rockchip/rga/rga-buf.c
182
struct rga_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/rockchip/rga/rga-buf.c
186
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/rockchip/rga/rga-buf.c
196
static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/rockchip/rga/rga-buf.c
198
struct rga_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/rockchip/rga/rga-buf.c
204
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
drivers/media/platform/rockchip/rga/rga-buf.c
208
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/rockchip/rga/rga-buf.c
216
static void rga_buf_stop_streaming(struct vb2_queue *q)
drivers/media/platform/rockchip/rga/rga-buf.c
218
struct rga_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/rockchip/rga/rga-buf.c
221
rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
511
static int rkcif_stream_init_vb2_queue(struct vb2_queue *q,
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
514
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
515
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
516
q->drv_priv = stream;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
517
q->ops = &rkcif_stream_vb2_ops;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
518
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
519
q->buf_struct_size = sizeof(struct rkcif_buffer);
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
520
q->min_queued_buffers = CIF_REQ_BUFS_MIN;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
521
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
522
q->lock = &stream->vlock;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
523
q->dev = stream->rkcif->dev;
drivers/media/platform/rockchip/rkcif/rkcif-stream.c
525
return vb2_queue_init(q);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1532
struct vb2_queue *q;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1552
q = &node->buf_queue;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1553
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1554
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1555
q->drv_priv = cap;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1556
q->ops = &rkisp1_vb2_ops;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1557
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1558
q->buf_struct_size = sizeof(struct rkisp1_buffer);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1559
q->min_queued_buffers = 1;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1560
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1561
q->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1562
q->dev = cap->rkisp1->dev;
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1563
ret = vb2_queue_init(q);
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
1570
vdev->queue = q;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2744
static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2749
node = container_of(q, struct rkisp1_vdev_node, buf_queue);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2751
q->type = V4L2_BUF_TYPE_META_OUTPUT;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2752
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2753
q->drv_priv = params;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2754
q->ops = &rkisp1_params_vb2_ops;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2755
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2756
q->buf_struct_size = sizeof(struct rkisp1_params_buffer);
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2757
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2758
q->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
2760
return vb2_queue_init(q);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
157
rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
161
node = container_of(q, struct rkisp1_vdev_node, buf_queue);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
163
q->type = V4L2_BUF_TYPE_META_CAPTURE;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
164
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
165
q->drv_priv = stats;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
166
q->ops = &rkisp1_stats_vb2_ops;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
167
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
168
q->buf_struct_size = sizeof(struct rkisp1_buffer);
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
169
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
170
q->lock = &node->vlock;
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
172
return vb2_queue_init(q);
drivers/media/platform/rockchip/rkvdec/rkvdec-h264-common.c
78
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
drivers/media/platform/rockchip/rkvdec/rkvdec-hevc-common.c
425
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
355
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1030
static void rkvdec_stop_streaming(struct vb2_queue *q)
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1032
struct rkvdec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1034
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/rockchip/rkvdec/rkvdec.c
1046
rkvdec_queue_cleanup(q, VB2_BUF_STATE_ERROR);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
977
static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/rockchip/rkvdec/rkvdec.c
979
struct rkvdec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/rockchip/rkvdec/rkvdec.c
984
if (V4L2_TYPE_IS_CAPTURE(q->type))
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
56
static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
58
struct gsc_ctx *ctx = q->drv_priv;
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
78
static void gsc_m2m_stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
80
struct gsc_ctx *ctx = q->drv_priv;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1715
struct vb2_queue *q = &fimc->vid_cap.vbq;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1740
vfd->queue = q;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1753
memset(q, 0, sizeof(*q));
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1754
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1755
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1756
q->drv_priv = ctx;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1757
q->ops = &fimc_capture_qops;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1758
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1759
q->buf_struct_size = sizeof(struct fimc_vid_buffer);
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1760
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1761
q->lock = &fimc->lock;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1762
q->dev = &fimc->pdev->dev;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
1764
ret = vb2_queue_init(q);
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
259
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
261
struct fimc_ctx *ctx = q->drv_priv;
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
290
static void stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/exynos4-is/fimc-capture.c
292
struct fimc_ctx *ctx = q->drv_priv;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
119
static void isp_video_capture_stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
121
struct fimc_isp *isp = vb2_get_drv_priv(q);
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
572
struct vb2_queue *q = &isp->video_capture.vb_queue;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
592
memset(q, 0, sizeof(*q));
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
593
q->type = type;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
594
q->io_modes = VB2_MMAP | VB2_USERPTR;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
595
q->ops = &isp_video_capture_qops;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
596
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
597
q->buf_struct_size = sizeof(struct isp_video_buf);
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
598
q->drv_priv = isp;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
599
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
600
q->lock = &isp->video_lock;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
601
q->dev = &isp->pdev->dev;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
603
ret = vb2_queue_init(q);
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
610
vdev->queue = q;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
76
static int isp_video_capture_start_streaming(struct vb2_queue *q,
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
79
struct fimc_isp *isp = vb2_get_drv_priv(q);
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1241
struct vb2_queue *q = &fimc->vb_queue;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1256
vfd->queue = q;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1263
memset(q, 0, sizeof(*q));
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1264
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1265
q->io_modes = VB2_MMAP | VB2_USERPTR;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1266
q->ops = &fimc_lite_qops;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1267
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1268
q->buf_struct_size = sizeof(struct flite_buffer);
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1269
q->drv_priv = fimc;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1270
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1271
q->lock = &fimc->lock;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1272
q->dev = &fimc->pdev->dev;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
1274
ret = vb2_queue_init(q);
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
305
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
307
struct fimc_lite *fimc = q->drv_priv;
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
339
static void stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/exynos4-is/fimc-lite.c
341
struct fimc_lite *fimc = q->drv_priv;
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
73
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
75
struct fimc_ctx *ctx = q->drv_priv;
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
80
static void stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
82
struct fimc_ctx *ctx = q->drv_priv;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1101
struct vb2_queue *q = &vp->vb_queue;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1121
memset(q, 0, sizeof(*q));
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1122
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1123
q->io_modes = VB2_MMAP | VB2_USERPTR;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1124
q->ops = &s3c_camif_qops;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1125
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1126
q->buf_struct_size = sizeof(struct camif_buffer);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1127
q->drv_priv = vp;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1128
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1129
q->lock = &vp->camif->lock;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1130
q->dev = camif->v4l2_dev.dev;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
1132
ret = vb2_queue_init(q);
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2561
static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2563
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2568
static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2570
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
2578
q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
787
struct vb2_queue *q;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
877
q = &ctx->vq_dst;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
878
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
879
q->drv_priv = ctx;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
880
q->lock = &dev->mfc_mutex;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
882
q->io_modes = VB2_MMAP;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
883
q->ops = get_dec_queue_ops();
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
885
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
886
q->ops = get_enc_queue_ops();
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
895
q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
896
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
897
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
898
ret = vb2_queue_init(q);
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
904
q = &ctx->vq_src;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
905
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
906
q->drv_priv = ctx;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
907
q->lock = &dev->mfc_mutex;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
909
q->io_modes = VB2_MMAP;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
910
q->ops = get_dec_queue_ops();
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
912
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
913
q->ops = get_enc_queue_ops();
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
924
q->allow_zero_bytesused = 1;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
930
q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
931
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
932
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
933
ret = vb2_queue_init(q);
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
1069
static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
1071
struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
1085
static void s5p_mfc_stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
1088
struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
1103
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
1119
} else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2558
static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2560
struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2564
(q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2572
if (q->memory != V4L2_MEMORY_DMABUF) {
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2588
static void s5p_mfc_stop_streaming(struct vb2_queue *q)
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2591
struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2603
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
2608
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
501
static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
503
struct bdisp_ctx *ctx = q->drv_priv;
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
510
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
524
static void bdisp_stop_streaming(struct vb2_queue *q)
drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
526
struct bdisp_ctx *ctx = q->drv_priv;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1120
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1121
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1300
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1301
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1307
static int delta_vb2_au_start_streaming(struct vb2_queue *q,
drivers/media/platform/st/sti/delta/delta-v4l2.c
1310
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1401
static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
drivers/media/platform/st/sti/delta/delta-v4l2.c
1403
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1466
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1467
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1512
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1513
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1531
static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
drivers/media/platform/st/sti/delta/delta-v4l2.c
1533
struct delta_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1585
struct vb2_queue *q;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1591
q = src_vq;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1592
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1593
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1594
q->drv_priv = ctx;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1596
q->buf_struct_size = sizeof(struct delta_au);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1597
q->ops = &delta_vb2_au_ops;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1598
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1599
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1600
q->lock = &delta->lock;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1601
q->dev = delta->dev;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1603
ret = vb2_queue_init(q);
drivers/media/platform/st/sti/delta/delta-v4l2.c
1608
q = dst_vq;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1609
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1610
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1611
q->drv_priv = ctx;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1613
q->buf_struct_size = sizeof(struct delta_frame)
drivers/media/platform/st/sti/delta/delta-v4l2.c
1615
q->ops = &delta_vb2_frame_ops;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1616
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1617
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1618
q->lock = &delta->lock;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1619
q->dev = delta->dev;
drivers/media/platform/st/sti/delta/delta-v4l2.c
1621
return vb2_queue_init(q);
drivers/media/platform/st/sti/hva/hva-v4l2.c
1009
if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
drivers/media/platform/st/sti/hva/hva-v4l2.c
1012
if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
drivers/media/platform/st/sti/hva/hva-v4l2.c
1089
vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
drivers/media/platform/st/sti/hva/hva-v4l2.c
1091
vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
drivers/media/platform/st/sti/hva/hva-v4l2.c
1094
vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
drivers/media/platform/st/sti/hva/hva-v4l2.c
1095
vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
drivers/media/platform/st/stm32/dma2d/dma2d.c
160
static int dma2d_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/st/stm32/dma2d/dma2d.c
162
struct dma2d_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/stm32/dma2d/dma2d.c
163
struct dma2d_frame *f = get_frame(ctx, q->type);
drivers/media/platform/st/stm32/dma2d/dma2d.c
169
static void dma2d_stop_streaming(struct vb2_queue *q)
drivers/media/platform/st/stm32/dma2d/dma2d.c
171
struct dma2d_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/st/stm32/dma2d/dma2d.c
175
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/st/stm32/stm32-dcmi.c
1888
struct vb2_queue *q;
drivers/media/platform/st/stm32/stm32-dcmi.c
1968
q = &dcmi->queue;
drivers/media/platform/st/stm32/stm32-dcmi.c
2020
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/st/stm32/stm32-dcmi.c
2021
q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
drivers/media/platform/st/stm32/stm32-dcmi.c
2022
q->lock = &dcmi->lock;
drivers/media/platform/st/stm32/stm32-dcmi.c
2023
q->drv_priv = dcmi;
drivers/media/platform/st/stm32/stm32-dcmi.c
2024
q->buf_struct_size = sizeof(struct dcmi_buf);
drivers/media/platform/st/stm32/stm32-dcmi.c
2025
q->ops = &dcmi_video_qops;
drivers/media/platform/st/stm32/stm32-dcmi.c
2026
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/st/stm32/stm32-dcmi.c
2027
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/st/stm32/stm32-dcmi.c
2028
q->min_queued_buffers = 2;
drivers/media/platform/st/stm32/stm32-dcmi.c
2029
q->allow_cache_hints = 1;
drivers/media/platform/st/stm32/stm32-dcmi.c
2030
q->dev = &pdev->dev;
drivers/media/platform/st/stm32/stm32-dcmi.c
2032
ret = vb2_queue_init(q);
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
865
struct vb2_queue *q;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
893
q = &vcap->queue;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
894
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
895
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
896
q->lock = &vcap->lock;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
897
q->drv_priv = vcap;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
898
q->buf_struct_size = sizeof(struct dcmipp_buf);
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
899
q->ops = &dcmipp_bytecap_qops;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
900
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
901
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
902
q->min_queued_buffers = 1;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
903
q->dev = dev;
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
912
ret = vb2_queue_init(q);
drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-bytecap.c
941
vdev->queue = q;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
401
struct vb2_queue *q = &csi->queue;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
412
q->min_queued_buffers = 3;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
413
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
414
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
415
q->lock = &csi->lock;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
416
q->drv_priv = csi;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
417
q->buf_struct_size = sizeof(struct sun4i_csi_buffer);
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
418
q->ops = &sun4i_csi_qops;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
419
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
420
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
421
q->dev = csi->dev;
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
423
ret = vb2_queue_init(q);
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1663
static int hdmirx_init_vb2_queue(struct vb2_queue *q,
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1669
q->type = buf_type;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1670
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1671
q->drv_priv = stream;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1672
q->ops = &hdmirx_vb2_ops;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1673
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1674
q->buf_struct_size = sizeof(struct hdmirx_buffer);
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1675
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1676
q->lock = &stream->vlock;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1677
q->dev = hdmirx_dev->dev;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1678
q->min_queued_buffers = 1;
drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
1680
return vb2_queue_init(q);
drivers/media/platform/ti/am437x/am437x-vpfe.c
2209
struct vb2_queue *q;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2226
q = &vpfe->buffer_queue;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2227
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2228
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2229
q->drv_priv = vpfe;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2230
q->ops = &vpfe_video_qops;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2231
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2232
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
drivers/media/platform/ti/am437x/am437x-vpfe.c
2233
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2234
q->lock = &vpfe->lock;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2235
q->min_queued_buffers = 1;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2236
q->dev = vpfe->pdev;
drivers/media/platform/ti/am437x/am437x-vpfe.c
2238
err = vb2_queue_init(q);
drivers/media/platform/ti/am437x/am437x-vpfe.c
2253
vdev->queue = q;
drivers/media/platform/ti/cal/cal-video.c
1045
struct vb2_queue *q = &ctx->vb_vidq;
drivers/media/platform/ti/cal/cal-video.c
1054
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/ti/cal/cal-video.c
1055
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/ti/cal/cal-video.c
1056
q->drv_priv = ctx;
drivers/media/platform/ti/cal/cal-video.c
1057
q->buf_struct_size = sizeof(struct cal_buffer);
drivers/media/platform/ti/cal/cal-video.c
1058
q->ops = &cal_video_qops;
drivers/media/platform/ti/cal/cal-video.c
1059
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/ti/cal/cal-video.c
1060
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/ti/cal/cal-video.c
1061
q->lock = &ctx->mutex;
drivers/media/platform/ti/cal/cal-video.c
1062
q->min_queued_buffers = 3;
drivers/media/platform/ti/cal/cal-video.c
1063
q->dev = ctx->cal->dev;
drivers/media/platform/ti/cal/cal-video.c
1065
ret = vb2_queue_init(q);
drivers/media/platform/ti/cal/cal-video.c
1074
vfd->queue = q;
drivers/media/platform/ti/cal/cal-video.c
245
struct vb2_queue *q = &ctx->vb_vidq;
drivers/media/platform/ti/cal/cal-video.c
253
if (vb2_is_busy(q)) {
drivers/media/platform/ti/davinci/vpif_capture.c
1399
struct vb2_queue *q;
drivers/media/platform/ti/davinci/vpif_capture.c
1421
q = &common->buffer_queue;
drivers/media/platform/ti/davinci/vpif_capture.c
1422
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/ti/davinci/vpif_capture.c
1423
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/ti/davinci/vpif_capture.c
1424
q->drv_priv = ch;
drivers/media/platform/ti/davinci/vpif_capture.c
1425
q->ops = &video_qops;
drivers/media/platform/ti/davinci/vpif_capture.c
1426
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/ti/davinci/vpif_capture.c
1427
q->buf_struct_size = sizeof(struct vpif_cap_buffer);
drivers/media/platform/ti/davinci/vpif_capture.c
1428
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/ti/davinci/vpif_capture.c
1429
q->min_queued_buffers = 1;
drivers/media/platform/ti/davinci/vpif_capture.c
1430
q->lock = &common->lock;
drivers/media/platform/ti/davinci/vpif_capture.c
1431
q->dev = vpif_dev;
drivers/media/platform/ti/davinci/vpif_capture.c
1433
err = vb2_queue_init(q);
drivers/media/platform/ti/davinci/vpif_capture.c
1449
vdev->queue = q;
drivers/media/platform/ti/davinci/vpif_capture.c
71
struct vb2_queue *q = vb->vb2_queue;
drivers/media/platform/ti/davinci/vpif_capture.c
72
struct channel_obj *ch = vb2_get_drv_priv(q);
drivers/media/platform/ti/davinci/vpif_display.c
1122
struct vb2_queue *q;
drivers/media/platform/ti/davinci/vpif_display.c
1162
q = &common->buffer_queue;
drivers/media/platform/ti/davinci/vpif_display.c
1163
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
drivers/media/platform/ti/davinci/vpif_display.c
1164
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/platform/ti/davinci/vpif_display.c
1165
q->drv_priv = ch;
drivers/media/platform/ti/davinci/vpif_display.c
1166
q->ops = &video_qops;
drivers/media/platform/ti/davinci/vpif_display.c
1167
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/ti/davinci/vpif_display.c
1168
q->buf_struct_size = sizeof(struct vpif_disp_buffer);
drivers/media/platform/ti/davinci/vpif_display.c
1169
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/ti/davinci/vpif_display.c
1170
q->min_queued_buffers = 1;
drivers/media/platform/ti/davinci/vpif_display.c
1171
q->lock = &common->lock;
drivers/media/platform/ti/davinci/vpif_display.c
1172
q->dev = vpif_dev;
drivers/media/platform/ti/davinci/vpif_display.c
1173
err = vb2_queue_init(q);
drivers/media/platform/ti/davinci/vpif_display.c
1193
vdev->queue = q;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
344
struct vb2_queue *q = &csi->vidq;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
347
if (vb2_is_busy(q))
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
746
static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
750
struct ti_csi2rx_dev *csi = vb2_get_drv_priv(q);
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
919
struct vb2_queue *q = &csi->vidq;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
922
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
923
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
924
q->drv_priv = csi;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
925
q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
926
q->ops = &csi_vb2_qops;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
927
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
928
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
929
q->dev = dmaengine_get_dma_device(csi->dma.chan);
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
930
q->lock = &csi->mutex;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
931
q->min_queued_buffers = 1;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
932
q->allow_cache_hints = 1;
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
934
ret = vb2_queue_init(q);
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
938
csi->vdev.queue = q;
drivers/media/platform/ti/omap3isp/ispvideo.h
202
#define isp_video_queue_to_isp_video_fh(q) \
drivers/media/platform/ti/omap3isp/ispvideo.h
203
container_of(q, struct isp_video_fh, queue)
drivers/media/platform/ti/vpe/vip.c
3032
struct vb2_queue *q;
drivers/media/platform/ti/vpe/vip.c
3059
q = &stream->vb_vidq;
drivers/media/platform/ti/vpe/vip.c
3060
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/platform/ti/vpe/vip.c
3061
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/platform/ti/vpe/vip.c
3062
q->drv_priv = stream;
drivers/media/platform/ti/vpe/vip.c
3063
q->buf_struct_size = sizeof(struct vip_buffer);
drivers/media/platform/ti/vpe/vip.c
3064
q->ops = &vip_video_qops;
drivers/media/platform/ti/vpe/vip.c
3065
q->mem_ops = &vb2_dma_contig_memops;
drivers/media/platform/ti/vpe/vip.c
3066
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/platform/ti/vpe/vip.c
3067
q->lock = &dev->mutex;
drivers/media/platform/ti/vpe/vip.c
3068
q->min_queued_buffers = 2;
drivers/media/platform/ti/vpe/vip.c
3069
q->dev = dev->v4l2_dev.dev;
drivers/media/platform/ti/vpe/vip.c
3071
ret = vb2_queue_init(q);
drivers/media/platform/ti/vpe/vip.c
3098
vfd->queue = q;
drivers/media/platform/ti/vpe/vip.c
3135
struct list_head *pos, *q;
drivers/media/platform/ti/vpe/vip.c
3142
list_for_each_safe(pos, q, &stream->dropq) {
drivers/media/platform/ti/vpe/vpe.c
2121
static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q,
drivers/media/platform/ti/vpe/vpe.c
2128
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/ti/vpe/vpe.c
2144
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/platform/ti/vpe/vpe.c
2174
static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/ti/vpe/vpe.c
2176
struct vpe_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/ti/vpe/vpe.c
2183
vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
drivers/media/platform/ti/vpe/vpe.c
2196
static void vpe_stop_streaming(struct vb2_queue *q)
drivers/media/platform/ti/vpe/vpe.c
2198
struct vpe_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/ti/vpe/vpe.c
2203
vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
drivers/media/platform/verisilicon/hantro_drv.c
49
struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
drivers/media/platform/verisilicon/hantro_drv.c
52
buf = vb2_find_buffer(q, ts);
drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
84
struct v4l2_ctrl_mpeg2_quantisation *q;
drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
86
q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
87
hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
179
const struct v4l2_vp8_quantization *q = &hdr->quant;
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
185
hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
188
u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
199
hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
200
hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
201
hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
202
hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
203
hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
95
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
drivers/media/platform/verisilicon/hantro_postproc.c
185
struct vb2_queue *queue = &m2m_ctx->cap_q_ctx.q;
drivers/media/platform/verisilicon/hantro_postproc.c
250
struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
drivers/media/platform/verisilicon/hantro_v4l2.c
488
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
drivers/media/platform/verisilicon/hantro_v4l2.c
495
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
drivers/media/platform/verisilicon/hantro_v4l2.c
759
vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
drivers/media/platform/verisilicon/hantro_v4l2.c
910
static bool hantro_vq_is_coded(struct vb2_queue *q)
drivers/media/platform/verisilicon/hantro_v4l2.c
912
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/verisilicon/hantro_v4l2.c
914
return ctx->is_encoder != V4L2_TYPE_IS_OUTPUT(q->type);
drivers/media/platform/verisilicon/hantro_v4l2.c
917
static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/platform/verisilicon/hantro_v4l2.c
919
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/verisilicon/hantro_v4l2.c
922
v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/platform/verisilicon/hantro_v4l2.c
924
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/verisilicon/hantro_v4l2.c
929
if (hantro_vq_is_coded(q)) {
drivers/media/platform/verisilicon/hantro_v4l2.c
932
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/verisilicon/hantro_v4l2.c
960
hantro_return_bufs(struct vb2_queue *q,
drivers/media/platform/verisilicon/hantro_v4l2.c
963
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/verisilicon/hantro_v4l2.c
977
static void hantro_stop_streaming(struct vb2_queue *q)
drivers/media/platform/verisilicon/hantro_v4l2.c
979
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/platform/verisilicon/hantro_v4l2.c
981
if (hantro_vq_is_coded(q)) {
drivers/media/platform/verisilicon/hantro_v4l2.c
992
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/platform/verisilicon/hantro_v4l2.c
993
hantro_return_bufs(q, v4l2_m2m_src_buf_remove);
drivers/media/platform/verisilicon/hantro_v4l2.c
995
hantro_return_bufs(q, v4l2_m2m_dst_buf_remove);
drivers/media/platform/verisilicon/hantro_v4l2.c
997
v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/platform/verisilicon/hantro_v4l2.c
999
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
4198
static int rockchip_av1_get_q_ctx(int q)
drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
4200
if (q <= 20)
drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
4202
if (q <= 60)
drivers/media/platform/verisilicon/rockchip_av1_entropymode.c
4204
if (q <= 120)
drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
86
struct v4l2_ctrl_mpeg2_quantisation *q;
drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
88
q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
89
hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
317
const struct v4l2_vp8_quantization *q = &hdr->quant;
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
323
hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
326
u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
337
hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
338
hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
339
hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
340
hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
341
hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
drivers/media/radio/radio-gemtek.c
153
int i, bit, q, mute;
drivers/media/radio/radio-gemtek.c
160
for (i = 0, q = gt->bu2614data; i < 32; i++, q >>= 1) {
drivers/media/radio/radio-gemtek.c
161
bit = (q & 1) ? GEMTEK_DA : 0;
drivers/media/radio/radio-gemtek.c
257
int i, q;
drivers/media/radio/radio-gemtek.c
259
q = inb_p(io); /* Read bus contents before probing. */
drivers/media/radio/radio-gemtek.c
269
outb_p(q >> 5, io); /* Write bus contents back. */
drivers/media/test-drivers/vicodec/vicodec-core.c
1215
if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
drivers/media/test-drivers/vicodec/vicodec-core.c
1228
vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1249
if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
drivers/media/test-drivers/vicodec/vicodec-core.c
1262
vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1511
static void vicodec_return_bufs(struct vb2_queue *q, u32 state)
drivers/media/test-drivers/vicodec/vicodec-core.c
1513
struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1517
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/test-drivers/vicodec/vicodec-core.c
1523
if (ctx->is_stateless && V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/test-drivers/vicodec/vicodec-core.c
1554
static int vicodec_start_streaming(struct vb2_queue *q,
drivers/media/test-drivers/vicodec/vicodec-core.c
1557
struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1558
struct vicodec_q_data *q_data = get_q_data(ctx, q->type);
drivers/media/test-drivers/vicodec/vicodec-core.c
1569
v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1573
if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
drivers/media/test-drivers/vicodec/vicodec-core.c
1574
(V4L2_TYPE_IS_CAPTURE(q->type) && ctx->is_enc))
drivers/media/test-drivers/vicodec/vicodec-core.c
1579
vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
drivers/media/test-drivers/vicodec/vicodec-core.c
1605
vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
drivers/media/test-drivers/vicodec/vicodec-core.c
1642
static void vicodec_stop_streaming(struct vb2_queue *q)
drivers/media/test-drivers/vicodec/vicodec-core.c
1644
struct vicodec_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1646
vicodec_return_bufs(q, VB2_BUF_STATE_ERROR);
drivers/media/test-drivers/vicodec/vicodec-core.c
1648
v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
drivers/media/test-drivers/vicodec/vicodec-core.c
1650
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
drivers/media/test-drivers/vicodec/vicodec-core.c
1654
if (!ctx->is_enc && V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/test-drivers/vicodec/vicodec-core.c
1657
if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) ||
drivers/media/test-drivers/vicodec/vicodec-core.c
1658
(V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) {
drivers/media/test-drivers/vicodec/vicodec-core.c
1666
if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) {
drivers/media/test-drivers/vicodec/vicodec-core.c
724
!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q))
drivers/media/test-drivers/vim2m.c
1249
static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/test-drivers/vim2m.c
1251
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/test-drivers/vim2m.c
1252
struct vim2m_q_data *q_data = get_q_data(ctx, q->type);
drivers/media/test-drivers/vim2m.c
1257
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/test-drivers/vim2m.c
1264
static void vim2m_stop_streaming(struct vb2_queue *q)
drivers/media/test-drivers/vim2m.c
1266
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
drivers/media/test-drivers/vim2m.c
1272
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/test-drivers/vimc/vimc-capture.c
396
struct vb2_queue *q;
drivers/media/test-drivers/vimc/vimc-capture.c
417
q = &vcapture->queue;
drivers/media/test-drivers/vimc/vimc-capture.c
418
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/test-drivers/vimc/vimc-capture.c
419
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/test-drivers/vimc/vimc-capture.c
421
q->io_modes |= VB2_USERPTR;
drivers/media/test-drivers/vimc/vimc-capture.c
422
q->drv_priv = vcapture;
drivers/media/test-drivers/vimc/vimc-capture.c
423
q->buf_struct_size = sizeof(struct vimc_capture_buffer);
drivers/media/test-drivers/vimc/vimc-capture.c
424
q->ops = &vimc_capture_qops;
drivers/media/test-drivers/vimc/vimc-capture.c
425
q->mem_ops = vimc_allocator == VIMC_ALLOCATOR_DMA_CONTIG
drivers/media/test-drivers/vimc/vimc-capture.c
427
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/test-drivers/vimc/vimc-capture.c
428
q->min_reqbufs_allocation = 2;
drivers/media/test-drivers/vimc/vimc-capture.c
429
q->lock = &vcapture->lock;
drivers/media/test-drivers/vimc/vimc-capture.c
430
q->dev = v4l2_dev->dev;
drivers/media/test-drivers/vimc/vimc-capture.c
432
ret = vb2_queue_init(q);
drivers/media/test-drivers/vimc/vimc-capture.c
465
vdev->queue = q;
drivers/media/test-drivers/visl/visl-dec.c
336
struct vb2_queue *out_q = &ctx->fh.m2m_ctx->out_q_ctx.q;
drivers/media/test-drivers/visl/visl-dec.c
337
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
drivers/media/test-drivers/visl/visl-dec.c
64
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
57
TP_PROTO(const struct v4l2_ctrl_mpeg2_quantisation *q),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
58
TP_ARGS(q),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
59
TP_STRUCT__entry(__field_struct(struct v4l2_ctrl_mpeg2_quantisation, q)),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
60
TP_fast_assign(__entry->q = *q;),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
63
__print_array(__entry->q.intra_quantiser_matrix,
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
64
ARRAY_SIZE(__entry->q.intra_quantiser_matrix),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
65
sizeof(__entry->q.intra_quantiser_matrix[0])),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
66
__print_array(__entry->q.non_intra_quantiser_matrix,
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
67
ARRAY_SIZE(__entry->q.non_intra_quantiser_matrix),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
68
sizeof(__entry->q.non_intra_quantiser_matrix[0])),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
69
__print_array(__entry->q.chroma_intra_quantiser_matrix,
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
70
ARRAY_SIZE(__entry->q.chroma_intra_quantiser_matrix),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
71
sizeof(__entry->q.chroma_intra_quantiser_matrix[0])),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
72
__print_array(__entry->q.chroma_non_intra_quantiser_matrix,
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
73
ARRAY_SIZE(__entry->q.chroma_non_intra_quantiser_matrix),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
74
sizeof(__entry->q.chroma_non_intra_quantiser_matrix[0]))
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
89
TP_PROTO(const struct v4l2_ctrl_mpeg2_quantisation *q),
drivers/media/test-drivers/visl/visl-trace-mpeg2.h
90
TP_ARGS(q)
drivers/media/test-drivers/vivid/vivid-core.c
890
struct vb2_queue *q,
drivers/media/test-drivers/vivid/vivid-core.c
904
q->type = buf_type;
drivers/media/test-drivers/vivid/vivid-core.c
905
q->io_modes = VB2_MMAP | VB2_DMABUF;
drivers/media/test-drivers/vivid/vivid-core.c
906
q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ;
drivers/media/test-drivers/vivid/vivid-core.c
915
q->max_num_buffers = MAX_VID_CAP_BUFFERS;
drivers/media/test-drivers/vivid/vivid-core.c
917
q->max_num_buffers = 1024;
drivers/media/test-drivers/vivid/vivid-core.c
919
q->max_num_buffers = 32768;
drivers/media/test-drivers/vivid/vivid-core.c
922
q->io_modes |= VB2_USERPTR;
drivers/media/test-drivers/vivid/vivid-core.c
923
q->drv_priv = dev;
drivers/media/test-drivers/vivid/vivid-core.c
924
q->buf_struct_size = sizeof(struct vivid_buffer);
drivers/media/test-drivers/vivid/vivid-core.c
925
q->ops = ops;
drivers/media/test-drivers/vivid/vivid-core.c
926
q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
drivers/media/test-drivers/vivid/vivid-core.c
928
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/test-drivers/vivid/vivid-core.c
929
q->min_reqbufs_allocation = min_reqbufs_allocation;
drivers/media/test-drivers/vivid/vivid-core.c
930
q->lock = &dev->mutex;
drivers/media/test-drivers/vivid/vivid-core.c
931
q->dev = dev->v4l2_dev.dev;
drivers/media/test-drivers/vivid/vivid-core.c
932
q->supports_requests = supports_requests[dev->inst];
drivers/media/test-drivers/vivid/vivid-core.c
933
q->requires_requests = supports_requests[dev->inst] >= 2;
drivers/media/test-drivers/vivid/vivid-core.c
934
q->allow_cache_hints = (cache_hints[dev->inst] == 1);
drivers/media/test-drivers/vivid/vivid-core.c
936
return vb2_queue_init(q);
drivers/media/test-drivers/vivid/vivid-sdr-cap.c
477
struct vb2_queue *q = &dev->vb_sdr_cap_q;
drivers/media/test-drivers/vivid/vivid-sdr-cap.c
480
if (vb2_is_busy(q))
drivers/media/test-drivers/vivid/vivid-vid-cap.c
672
struct vb2_queue *q = &dev->vb_vid_cap_q;
drivers/media/test-drivers/vivid/vivid-vid-cap.c
681
if (vb2_is_busy(q)) {
drivers/media/test-drivers/vivid/vivid-vid-out.c
448
struct vb2_queue *q = &dev->vb_vid_out_q;
drivers/media/test-drivers/vivid/vivid-vid-out.c
456
if (vb2_is_busy(q) &&
drivers/media/test-drivers/vivid/vivid-vid-out.c
471
if (vb2_is_busy(q))
drivers/media/tuners/max2165.c
153
u32 q, f = 0;
drivers/media/tuners/max2165.c
159
q = dividend / divisor;
drivers/media/tuners/max2165.c
160
remainder = dividend - q * divisor;
drivers/media/tuners/max2165.c
171
*quotient = q;
drivers/media/usb/airspy/airspy.c
645
struct vb2_queue *q = &s->vb_queue;
drivers/media/usb/airspy/airspy.c
648
if (vb2_is_busy(q))
drivers/media/usb/au0828/au0828-video.c
1802
struct vb2_queue *q;
drivers/media/usb/au0828/au0828-video.c
1805
q = &dev->vb_vidq;
drivers/media/usb/au0828/au0828-video.c
1806
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/au0828/au0828-video.c
1807
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/usb/au0828/au0828-video.c
1808
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/au0828/au0828-video.c
1809
q->drv_priv = dev;
drivers/media/usb/au0828/au0828-video.c
1810
q->buf_struct_size = sizeof(struct au0828_buffer);
drivers/media/usb/au0828/au0828-video.c
1811
q->ops = &au0828_video_qops;
drivers/media/usb/au0828/au0828-video.c
1812
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/au0828/au0828-video.c
1814
rc = vb2_queue_init(q);
drivers/media/usb/au0828/au0828-video.c
1819
q = &dev->vb_vbiq;
drivers/media/usb/au0828/au0828-video.c
1820
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/usb/au0828/au0828-video.c
1821
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/usb/au0828/au0828-video.c
1822
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/au0828/au0828-video.c
1823
q->drv_priv = dev;
drivers/media/usb/au0828/au0828-video.c
1824
q->buf_struct_size = sizeof(struct au0828_buffer);
drivers/media/usb/au0828/au0828-video.c
1825
q->ops = &au0828_vbi_qops;
drivers/media/usb/au0828/au0828-video.c
1826
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/au0828/au0828-video.c
1828
rc = vb2_queue_init(q);
drivers/media/usb/au0828/au0828-video.c
290
struct vb2_queue *q = vb->vb2_buf.vb2_queue;
drivers/media/usb/au0828/au0828-video.c
295
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
drivers/media/usb/cx231xx/cx231xx-417.c
1724
struct vb2_queue *q;
drivers/media/usb/cx231xx/cx231xx-417.c
1762
q = &dev->mpegq;
drivers/media/usb/cx231xx/cx231xx-417.c
1763
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/cx231xx/cx231xx-417.c
1764
q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/usb/cx231xx/cx231xx-417.c
1765
q->drv_priv = dev;
drivers/media/usb/cx231xx/cx231xx-417.c
1766
q->buf_struct_size = sizeof(struct cx231xx_buffer);
drivers/media/usb/cx231xx/cx231xx-417.c
1767
q->ops = &cx231xx_video_qops;
drivers/media/usb/cx231xx/cx231xx-417.c
1768
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/cx231xx/cx231xx-417.c
1769
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/cx231xx/cx231xx-417.c
1770
q->min_queued_buffers = 1;
drivers/media/usb/cx231xx/cx231xx-417.c
1771
q->lock = &dev->lock;
drivers/media/usb/cx231xx/cx231xx-417.c
1772
err = vb2_queue_init(q);
drivers/media/usb/cx231xx/cx231xx-417.c
1775
dev->v4l_device.queue = q;
drivers/media/usb/cx231xx/cx231xx-video.c
1756
struct vb2_queue *q;
drivers/media/usb/cx231xx/cx231xx-video.c
1804
q = &dev->vidq;
drivers/media/usb/cx231xx/cx231xx-video.c
1805
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/cx231xx/cx231xx-video.c
1806
q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/usb/cx231xx/cx231xx-video.c
1807
q->drv_priv = dev;
drivers/media/usb/cx231xx/cx231xx-video.c
1808
q->buf_struct_size = sizeof(struct cx231xx_buffer);
drivers/media/usb/cx231xx/cx231xx-video.c
1809
q->ops = &cx231xx_video_qops;
drivers/media/usb/cx231xx/cx231xx-video.c
1810
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/cx231xx/cx231xx-video.c
1811
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/cx231xx/cx231xx-video.c
1812
q->min_queued_buffers = 1;
drivers/media/usb/cx231xx/cx231xx-video.c
1813
q->lock = &dev->lock;
drivers/media/usb/cx231xx/cx231xx-video.c
1814
ret = vb2_queue_init(q);
drivers/media/usb/cx231xx/cx231xx-video.c
1817
dev->vdev.queue = q;
drivers/media/usb/cx231xx/cx231xx-video.c
1864
q = &dev->vbiq;
drivers/media/usb/cx231xx/cx231xx-video.c
1865
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/usb/cx231xx/cx231xx-video.c
1866
q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/media/usb/cx231xx/cx231xx-video.c
1867
q->drv_priv = dev;
drivers/media/usb/cx231xx/cx231xx-video.c
1868
q->buf_struct_size = sizeof(struct cx231xx_buffer);
drivers/media/usb/cx231xx/cx231xx-video.c
1869
q->ops = &cx231xx_vbi_qops;
drivers/media/usb/cx231xx/cx231xx-video.c
1870
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/cx231xx/cx231xx-video.c
1871
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/cx231xx/cx231xx-video.c
1872
q->min_queued_buffers = 1;
drivers/media/usb/cx231xx/cx231xx-video.c
1873
q->lock = &dev->lock;
drivers/media/usb/cx231xx/cx231xx-video.c
1874
ret = vb2_queue_init(q);
drivers/media/usb/cx231xx/cx231xx-video.c
1877
dev->vbi_dev.queue = q;
drivers/media/usb/dvb-usb/cxusb-analog.c
39
static int cxusb_medion_v_queue_setup(struct vb2_queue *q,
drivers/media/usb/dvb-usb/cxusb-analog.c
45
struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q);
drivers/media/usb/dvb-usb/cxusb-analog.c
772
static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
drivers/media/usb/dvb-usb/cxusb-analog.c
775
struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q);
drivers/media/usb/dvb-usb/cxusb-analog.c
898
static void cxusb_medion_v_stop_streaming(struct vb2_queue *q)
drivers/media/usb/dvb-usb/cxusb-analog.c
900
struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q);
drivers/media/usb/em28xx/em28xx-video.c
1237
struct vb2_queue *q;
drivers/media/usb/em28xx/em28xx-video.c
1241
q = &v4l2->vb_vidq;
drivers/media/usb/em28xx/em28xx-video.c
1242
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/em28xx/em28xx-video.c
1243
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/usb/em28xx/em28xx-video.c
1244
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/em28xx/em28xx-video.c
1245
q->drv_priv = dev;
drivers/media/usb/em28xx/em28xx-video.c
1246
q->buf_struct_size = sizeof(struct em28xx_buffer);
drivers/media/usb/em28xx/em28xx-video.c
1247
q->ops = &em28xx_video_qops;
drivers/media/usb/em28xx/em28xx-video.c
1248
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/em28xx/em28xx-video.c
1250
rc = vb2_queue_init(q);
drivers/media/usb/em28xx/em28xx-video.c
1255
q = &v4l2->vb_vbiq;
drivers/media/usb/em28xx/em28xx-video.c
1256
q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
drivers/media/usb/em28xx/em28xx-video.c
1257
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR;
drivers/media/usb/em28xx/em28xx-video.c
1258
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/em28xx/em28xx-video.c
1259
q->drv_priv = dev;
drivers/media/usb/em28xx/em28xx-video.c
1260
q->buf_struct_size = sizeof(struct em28xx_buffer);
drivers/media/usb/em28xx/em28xx-video.c
1261
q->ops = &em28xx_vbi_qops;
drivers/media/usb/em28xx/em28xx-video.c
1262
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/em28xx/em28xx-video.c
1264
rc = vb2_queue_init(q);
drivers/media/usb/go7007/go7007-fw.c
290
static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q)
drivers/media/usb/go7007/go7007-fw.c
304
buf[p++] = (default_intra_quant_table[zz[i]] * q) >> 3;
drivers/media/usb/go7007/go7007-fw.c
930
int q = 0;
drivers/media/usb/go7007/go7007-fw.c
937
q > 0 ? sgop_expt_addr * q :
drivers/media/usb/go7007/go7007-fw.c
939
q > 0 ? sgop_expt_addr * q :
drivers/media/usb/go7007/go7007-fw.c
941
q > 0 ? sgop_expt_addr * q :
drivers/media/usb/go7007/go7007-fw.c
943
q > 0 ? sgop_expt_addr * q :
drivers/media/usb/go7007/go7007-fw.c
946
u32 calc_q = q > 0 ? q : cplx[0] / sgop_expt_addr;
drivers/media/usb/go7007/go7007-v4l2.c
343
static int go7007_queue_setup(struct vb2_queue *q,
drivers/media/usb/go7007/go7007-v4l2.c
397
static int go7007_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/media/usb/go7007/go7007-v4l2.c
399
struct go7007 *go = vb2_get_drv_priv(q);
drivers/media/usb/go7007/go7007-v4l2.c
425
static void go7007_stop_streaming(struct vb2_queue *q)
drivers/media/usb/go7007/go7007-v4l2.c
427
struct go7007 *go = vb2_get_drv_priv(q);
drivers/media/usb/gspca/gspca.c
1450
struct vb2_queue *q;
drivers/media/usb/gspca/gspca.c
1510
q = &gspca_dev->queue;
drivers/media/usb/gspca/gspca.c
1511
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/gspca/gspca.c
1512
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
drivers/media/usb/gspca/gspca.c
1513
q->drv_priv = gspca_dev;
drivers/media/usb/gspca/gspca.c
1514
q->buf_struct_size = sizeof(struct gspca_buffer);
drivers/media/usb/gspca/gspca.c
1515
q->ops = &gspca_qops;
drivers/media/usb/gspca/gspca.c
1516
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/gspca/gspca.c
1517
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/gspca/gspca.c
1518
q->min_queued_buffers = 2;
drivers/media/usb/gspca/gspca.c
1519
q->lock = &gspca_dev->usb_lock;
drivers/media/usb/gspca/gspca.c
1520
ret = vb2_queue_init(q);
drivers/media/usb/gspca/gspca.c
1523
gspca_dev->vdev.queue = q;
drivers/media/usb/gspca/topro.c
1439
static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
drivers/media/usb/gspca/topro.c
1444
gspca_dbg(gspca_dev, D_STREAM, "q %d -> %d\n", sd->quality, q);
drivers/media/usb/gspca/topro.c
1445
sd->quality = q;
drivers/media/usb/gspca/topro.c
1446
if (q > 16)
drivers/media/usb/gspca/topro.c
1447
q = 16;
drivers/media/usb/gspca/topro.c
1449
jpeg_set_qual(sd->jpeg_hdr, jpeg_q[q]);
drivers/media/usb/gspca/topro.c
1452
DQT[q], sizeof DQT[0]);
drivers/media/usb/gspca/topro.c
1456
static void setquality(struct gspca_dev *gspca_dev, s32 q)
drivers/media/usb/gspca/topro.c
1460
if (q != 16)
drivers/media/usb/gspca/topro.c
1461
q = 15 - q;
drivers/media/usb/gspca/topro.c
1465
reg_w(gspca_dev, TP6800_R79_QUALITY, q);
drivers/media/usb/gspca/topro.c
1468
if (q == 15 && sd->bridge == BRIDGE_TP6810) {
drivers/media/usb/hackrf/hackrf.c
917
struct vb2_queue *q;
drivers/media/usb/hackrf/hackrf.c
924
q = &dev->rx_vb2_queue;
drivers/media/usb/hackrf/hackrf.c
926
q = &dev->tx_vb2_queue;
drivers/media/usb/hackrf/hackrf.c
928
if (vb2_is_busy(q))
drivers/media/usb/hdpvr/hdpvr-video.c
102
static int hdpvr_free_queue(struct list_head *q)
drivers/media/usb/hdpvr/hdpvr-video.c
109
for (p = q->next; p != q;) {
drivers/media/usb/msi2500/msi2500.c
921
struct vb2_queue *q = &dev->vb_queue;
drivers/media/usb/msi2500/msi2500.c
927
if (vb2_is_busy(q))
drivers/media/usb/s2255/s2255drv.c
1096
struct vb2_queue *q = &vc->vb_vidq;
drivers/media/usb/s2255/s2255drv.c
1102
if (vb2_is_busy(q))
drivers/media/usb/s2255/s2255drv.c
1588
struct vb2_queue *q;
drivers/media/usb/s2255/s2255drv.c
1622
q = &vc->vb_vidq;
drivers/media/usb/s2255/s2255drv.c
1623
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/s2255/s2255drv.c
1624
q->io_modes = VB2_MMAP | VB2_READ | VB2_USERPTR;
drivers/media/usb/s2255/s2255drv.c
1625
q->drv_priv = vc;
drivers/media/usb/s2255/s2255drv.c
1626
q->lock = &vc->vb_lock;
drivers/media/usb/s2255/s2255drv.c
1627
q->buf_struct_size = sizeof(struct s2255_buffer);
drivers/media/usb/s2255/s2255drv.c
1628
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/s2255/s2255drv.c
1629
q->ops = &s2255_video_qops;
drivers/media/usb/s2255/s2255drv.c
1630
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/s2255/s2255drv.c
1631
ret = vb2_queue_init(q);
drivers/media/usb/s2255/s2255drv.c
1639
vc->vdev.queue = q;
drivers/media/usb/s2255/s2255drv.c
811
struct vb2_queue *q = &vc->vb_vidq;
drivers/media/usb/s2255/s2255drv.c
825
if (vb2_is_busy(q)) {
drivers/media/usb/stk1160/stk1160-v4l.c
483
struct vb2_queue *q = &dev->vb_vidq;
drivers/media/usb/stk1160/stk1160-v4l.c
487
if (vb2_is_busy(q))
drivers/media/usb/stk1160/stk1160-v4l.c
518
struct vb2_queue *q = &dev->vb_vidq;
drivers/media/usb/stk1160/stk1160-v4l.c
523
if (vb2_is_busy(q))
drivers/media/usb/stk1160/stk1160-v4l.c
777
struct vb2_queue *q;
drivers/media/usb/stk1160/stk1160-v4l.c
779
q = &dev->vb_vidq;
drivers/media/usb/stk1160/stk1160-v4l.c
780
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/media/usb/stk1160/stk1160-v4l.c
781
q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
drivers/media/usb/stk1160/stk1160-v4l.c
782
q->drv_priv = dev;
drivers/media/usb/stk1160/stk1160-v4l.c
783
q->buf_struct_size = sizeof(struct stk1160_buffer);
drivers/media/usb/stk1160/stk1160-v4l.c
784
q->ops = &stk1160_video_qops;
drivers/media/usb/stk1160/stk1160-v4l.c
785
q->mem_ops = &vb2_vmalloc_memops;
drivers/media/usb/stk1160/stk1160-v4l.c
786
q->lock = &dev->vb_queue_lock;
drivers/media/usb/stk1160/stk1160-v4l.c
787
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/media/usb/stk1160/stk1160-v4l.c
789
rc = vb2_queue_init(q);
drivers/media/usb/uvc/uvc_driver.c
2532
#define UVC_INFO_QUIRK(q) (kernel_ulong_t)&(struct uvc_device_info){.quirks = q}
drivers/media/v4l2-core/v4l2-ctrls-core.c
653
static int validate_av1_quantization(struct v4l2_av1_quantization *q)
drivers/media/v4l2-core/v4l2-ctrls-core.c
655
if (q->flags > GENMASK(2, 0))
drivers/media/v4l2-core/v4l2-ctrls-core.c
658
if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
659
q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
660
q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
661
q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
662
q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
663
q->delta_q_res > GENMASK(1, 0))
drivers/media/v4l2-core/v4l2-ctrls-core.c
666
if (q->qm_y > GENMASK(3, 0) ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
667
q->qm_u > GENMASK(3, 0) ||
drivers/media/v4l2-core/v4l2-ctrls-core.c
668
q->qm_v > GENMASK(3, 0))
drivers/media/v4l2-core/v4l2-mc.c
314
int v4l_vb2q_enable_media_source(struct vb2_queue *q)
drivers/media/v4l2-core/v4l2-mc.c
316
struct v4l2_fh *fh = q->owner;
drivers/media/v4l2-core/v4l2-mem2mem.c
1259
ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
drivers/media/v4l2-core/v4l2-mem2mem.c
1267
if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
drivers/media/v4l2-core/v4l2-mem2mem.c
1271
m2m_ctx->q_lock = out_q_ctx->q.lock;
drivers/media/v4l2-core/v4l2-mem2mem.c
1285
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
drivers/media/v4l2-core/v4l2-mem2mem.c
1286
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
drivers/media/v4l2-core/v4l2-mem2mem.c
129
return &get_queue_ctx(m2m_ctx, type)->q;
drivers/media/v4l2-core/v4l2-mem2mem.c
1351
out_q_ctx.q);
drivers/media/v4l2-core/v4l2-mem2mem.c
1395
struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type);
drivers/media/v4l2-core/v4l2-mem2mem.c
1397
if (q->type != remove->type)
drivers/media/v4l2-core/v4l2-mem2mem.c
1400
return vb2_core_remove_bufs(q, remove->index, remove->count);
drivers/media/v4l2-core/v4l2-mem2mem.c
301
if (!m2m_ctx->out_q_ctx.q.streaming ||
drivers/media/v4l2-core/v4l2-mem2mem.c
302
(!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) {
drivers/media/v4l2-core/v4l2-mem2mem.c
351
if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
drivers/media/v4l2-core/v4l2-mem2mem.c
495
WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
drivers/media/v4l2-core/v4l2-mem2mem.c
689
struct vb2_queue *q)
drivers/media/v4l2-core/v4l2-mem2mem.c
692
if (V4L2_TYPE_IS_OUTPUT(q->type))
drivers/media/v4l2-core/v4l2-mem2mem.c
702
struct vb2_queue *q)
drivers/media/v4l2-core/v4l2-mem2mem.c
704
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
drivers/media/v4l2-core/v4l2-mem2mem.c
730
struct vb2_queue *q)
drivers/media/v4l2-core/v4l2-mem2mem.c
736
if (WARN_ON(q->is_output))
drivers/media/v4l2-core/v4l2-mem2mem.c
738
if (list_empty(&q->queued_list))
drivers/media/v4l2-core/v4l2-mem2mem.c
741
vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
drivers/media/v4l2-core/v4l2-mem2mem.c
750
atomic_inc(&q->owned_by_drv_count);
drivers/media/v4l2-core/v4l2-mem2mem.c
881
ret = vb2_streamoff(&q_ctx->q, type);
drivers/misc/ntsync.c
1001
struct ntsync_q_entry *entry = &q->entries[i];
drivers/misc/ntsync.c
1011
signaled = atomic_read(&q->signaled);
drivers/misc/ntsync.c
1016
ret = q->ownerdead ? -EOWNERDEAD : 0;
drivers/misc/ntsync.c
1024
kfree(q);
drivers/misc/ntsync.c
1031
struct ntsync_q *q;
drivers/misc/ntsync.c
1039
ret = setup_wait(dev, &args, true, &q);
drivers/misc/ntsync.c
1048
struct ntsync_q_entry *entry = &q->entries[i];
drivers/misc/ntsync.c
1061
struct ntsync_q_entry *entry = &q->entries[args.count];
drivers/misc/ntsync.c
1071
try_wake_all(dev, q, NULL);
drivers/misc/ntsync.c
1081
struct ntsync_obj *obj = q->entries[args.count].obj;
drivers/misc/ntsync.c
1083
if (atomic_read(&q->signaled) == -1) {
drivers/misc/ntsync.c
1092
ret = ntsync_schedule(q, &args);
drivers/misc/ntsync.c
1099
struct ntsync_q_entry *entry = &q->entries[i];
drivers/misc/ntsync.c
1116
struct ntsync_q_entry *entry = &q->entries[args.count];
drivers/misc/ntsync.c
1127
signaled = atomic_read(&q->signaled);
drivers/misc/ntsync.c
1132
ret = q->ownerdead ? -EOWNERDEAD : 0;
drivers/misc/ntsync.c
1140
kfree(q);
drivers/misc/ntsync.c
254
static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q,
drivers/misc/ntsync.c
257
__u32 count = q->count;
drivers/misc/ntsync.c
267
if (q->entries[i].obj != locked_obj)
drivers/misc/ntsync.c
268
dev_lock_obj(dev, q->entries[i].obj);
drivers/misc/ntsync.c
272
if (!is_signaled(q->entries[i].obj, q->owner)) {
drivers/misc/ntsync.c
278
if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) {
drivers/misc/ntsync.c
280
struct ntsync_obj *obj = q->entries[i].obj;
drivers/misc/ntsync.c
288
q->ownerdead = true;
drivers/misc/ntsync.c
291
obj->u.mutex.owner = q->owner;
drivers/misc/ntsync.c
299
wake_up_process(q->task);
drivers/misc/ntsync.c
303
if (q->entries[i].obj != locked_obj)
drivers/misc/ntsync.c
304
dev_unlock_obj(dev, q->entries[i].obj);
drivers/misc/ntsync.c
316
try_wake_all(dev, entry->q, obj);
drivers/misc/ntsync.c
327
struct ntsync_q *q = entry->q;
drivers/misc/ntsync.c
333
if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
drivers/misc/ntsync.c
335
wake_up_process(q->task);
drivers/misc/ntsync.c
348
struct ntsync_q *q = entry->q;
drivers/misc/ntsync.c
353
if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner)
drivers/misc/ntsync.c
356
if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
drivers/misc/ntsync.c
358
q->ownerdead = true;
drivers/misc/ntsync.c
361
mutex->u.mutex.owner = q->owner;
drivers/misc/ntsync.c
362
wake_up_process(q->task);
drivers/misc/ntsync.c
375
struct ntsync_q *q = entry->q;
drivers/misc/ntsync.c
381
if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
drivers/misc/ntsync.c
384
wake_up_process(q->task);
drivers/misc/ntsync.c
828
static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
drivers/misc/ntsync.c
847
if (atomic_read(&q->signaled) != -1) {
drivers/misc/ntsync.c
868
struct ntsync_q *q;
drivers/misc/ntsync.c
887
q = kmalloc_flex(*q, entries, total_count);
drivers/misc/ntsync.c
888
if (!q)
drivers/misc/ntsync.c
890
q->task = current;
drivers/misc/ntsync.c
891
q->owner = args->owner;
drivers/misc/ntsync.c
892
atomic_set(&q->signaled, -1);
drivers/misc/ntsync.c
893
q->all = all;
drivers/misc/ntsync.c
894
q->ownerdead = false;
drivers/misc/ntsync.c
895
q->count = count;
drivers/misc/ntsync.c
898
struct ntsync_q_entry *entry = &q->entries[i];
drivers/misc/ntsync.c
907
if (obj == q->entries[j].obj) {
drivers/misc/ntsync.c
915
entry->q = q;
drivers/misc/ntsync.c
919
*ret_q = q;
drivers/misc/ntsync.c
924
put_obj(q->entries[j].obj);
drivers/misc/ntsync.c
925
kfree(q);
drivers/misc/ntsync.c
948
struct ntsync_q *q;
drivers/misc/ntsync.c
956
ret = setup_wait(dev, &args, false, &q);
drivers/misc/ntsync.c
967
struct ntsync_q_entry *entry = &q->entries[i];
drivers/misc/ntsync.c
984
struct ntsync_obj *obj = q->entries[i].obj;
drivers/misc/ntsync.c
986
if (atomic_read(&q->signaled) != -1)
drivers/misc/ntsync.c
99
struct ntsync_q *q;
drivers/misc/ntsync.c
996
ret = ntsync_schedule(q, &args);
drivers/misc/sgi-gru/grukservices.c
991
#define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
drivers/misc/uacce/uacce.c
101
ret = uacce->ops->ioctl(q, cmd, arg);
drivers/misc/uacce/uacce.c
120
static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
drivers/misc/uacce/uacce.c
138
q->handle = handle;
drivers/misc/uacce/uacce.c
139
q->pasid = pasid;
drivers/misc/uacce/uacce.c
143
static void uacce_unbind_queue(struct uacce_queue *q)
drivers/misc/uacce/uacce.c
145
if (!q->handle)
drivers/misc/uacce/uacce.c
147
iommu_sva_unbind_device(q->handle);
drivers/misc/uacce/uacce.c
148
q->handle = NULL;
drivers/misc/uacce/uacce.c
154
struct uacce_queue *q;
drivers/misc/uacce/uacce.c
161
q = kzalloc_obj(struct uacce_queue);
drivers/misc/uacce/uacce.c
162
if (!q)
drivers/misc/uacce/uacce.c
172
ret = uacce_bind_queue(uacce, q);
drivers/misc/uacce/uacce.c
176
q->uacce = uacce;
drivers/misc/uacce/uacce.c
179
ret = uacce->ops->get_queue(uacce, q->pasid, q);
drivers/misc/uacce/uacce.c
184
init_waitqueue_head(&q->wait);
drivers/misc/uacce/uacce.c
185
filep->private_data = q;
drivers/misc/uacce/uacce.c
186
q->state = UACCE_Q_INIT;
drivers/misc/uacce/uacce.c
187
q->mapping = filep->f_mapping;
drivers/misc/uacce/uacce.c
188
mutex_init(&q->mutex);
drivers/misc/uacce/uacce.c
189
list_add(&q->list, &uacce->queues);
drivers/misc/uacce/uacce.c
195
uacce_unbind_queue(q);
drivers/misc/uacce/uacce.c
197
kfree(q);
drivers/misc/uacce/uacce.c
204
struct uacce_queue *q = filep->private_data;
drivers/misc/uacce/uacce.c
205
struct uacce_device *uacce = q->uacce;
drivers/misc/uacce/uacce.c
208
uacce_put_queue(q);
drivers/misc/uacce/uacce.c
209
uacce_unbind_queue(q);
drivers/misc/uacce/uacce.c
21
static bool uacce_queue_is_valid(struct uacce_queue *q)
drivers/misc/uacce/uacce.c
210
list_del(&q->list);
drivers/misc/uacce/uacce.c
212
kfree(q);
drivers/misc/uacce/uacce.c
219
struct uacce_queue *q = vma->vm_private_data;
drivers/misc/uacce/uacce.c
222
struct uacce_qfile_region *qfr = q->qfrs[vma->vm_pgoff];
drivers/misc/uacce/uacce.c
224
mutex_lock(&q->mutex);
drivers/misc/uacce/uacce.c
225
q->qfrs[vma->vm_pgoff] = NULL;
drivers/misc/uacce/uacce.c
226
mutex_unlock(&q->mutex);
drivers/misc/uacce/uacce.c
23
return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
drivers/misc/uacce/uacce.c
243
struct uacce_queue *q = filep->private_data;
drivers/misc/uacce/uacce.c
244
struct uacce_device *uacce = q->uacce;
drivers/misc/uacce/uacce.c
26
static int uacce_start_queue(struct uacce_queue *q)
drivers/misc/uacce/uacce.c
260
vma->vm_private_data = q;
drivers/misc/uacce/uacce.c
263
mutex_lock(&q->mutex);
drivers/misc/uacce/uacce.c
264
if (!uacce_queue_is_valid(q)) {
drivers/misc/uacce/uacce.c
269
if (q->qfrs[type]) {
drivers/misc/uacce/uacce.c
282
ret = uacce->ops->mmap(q, vma, qfr);
drivers/misc/uacce/uacce.c
292
q->qfrs[type] = qfr;
drivers/misc/uacce/uacce.c
293
mutex_unlock(&q->mutex);
drivers/misc/uacce/uacce.c
298
mutex_unlock(&q->mutex);
drivers/misc/uacce/uacce.c
30
if (q->state != UACCE_Q_INIT)
drivers/misc/uacce/uacce.c
305
struct uacce_queue *q = file->private_data;
drivers/misc/uacce/uacce.c
306
struct uacce_device *uacce = q->uacce;
drivers/misc/uacce/uacce.c
309
mutex_lock(&q->mutex);
drivers/misc/uacce/uacce.c
310
if (!uacce_queue_is_valid(q))
drivers/misc/uacce/uacce.c
313
poll_wait(file, &q->wait, wait);
drivers/misc/uacce/uacce.c
315
if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
drivers/misc/uacce/uacce.c
319
mutex_unlock(&q->mutex);
drivers/misc/uacce/uacce.c
33
if (q->uacce->ops->start_queue) {
drivers/misc/uacce/uacce.c
34
ret = q->uacce->ops->start_queue(q);
drivers/misc/uacce/uacce.c
39
q->state = UACCE_Q_STARTED;
drivers/misc/uacce/uacce.c
43
static int uacce_stop_queue(struct uacce_queue *q)
drivers/misc/uacce/uacce.c
45
struct uacce_device *uacce = q->uacce;
drivers/misc/uacce/uacce.c
47
if (q->state != UACCE_Q_STARTED)
drivers/misc/uacce/uacce.c
51
uacce->ops->stop_queue(q);
drivers/misc/uacce/uacce.c
53
q->state = UACCE_Q_INIT;
drivers/misc/uacce/uacce.c
574
struct uacce_queue *q, *next_q;
drivers/misc/uacce/uacce.c
58
static void uacce_put_queue(struct uacce_queue *q)
drivers/misc/uacce/uacce.c
586
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
drivers/misc/uacce/uacce.c
591
mutex_lock(&q->mutex);
drivers/misc/uacce/uacce.c
592
uacce_put_queue(q);
drivers/misc/uacce/uacce.c
593
mutex_unlock(&q->mutex);
drivers/misc/uacce/uacce.c
594
uacce_unbind_queue(q);
drivers/misc/uacce/uacce.c
60
struct uacce_device *uacce = q->uacce;
drivers/misc/uacce/uacce.c
600
unmap_mapping_range(q->mapping, 0, 0, 1);
drivers/misc/uacce/uacce.c
62
uacce_stop_queue(q);
drivers/misc/uacce/uacce.c
64
if (q->state != UACCE_Q_INIT)
drivers/misc/uacce/uacce.c
68
uacce->ops->put_queue(q);
drivers/misc/uacce/uacce.c
70
q->state = UACCE_Q_ZOMBIE;
drivers/misc/uacce/uacce.c
76
struct uacce_queue *q = filep->private_data;
drivers/misc/uacce/uacce.c
77
struct uacce_device *uacce = q->uacce;
drivers/misc/uacce/uacce.c
89
if (!uacce_queue_is_valid(q))
drivers/misc/uacce/uacce.c
94
ret = uacce_start_queue(q);
drivers/misc/uacce/uacce.c
97
ret = uacce_stop_queue(q);
drivers/misc/vmw_vmci/vmci_queue_pair.c
248
static void qp_free_queue(void *q, u64 size)
drivers/misc/vmw_vmci/vmci_queue_pair.c
250
struct vmci_queue *queue = q;
drivers/mmc/core/block.c
1526
struct request_queue *q = req->q;
drivers/mmc/core/block.c
1569
blk_mq_run_hw_queues(q, true);
drivers/mmc/core/block.c
1596
struct request_queue *q = req->q;
drivers/mmc/core/block.c
1597
struct mmc_queue *mq = q->queuedata;
drivers/mmc/core/block.c
1605
else if (likely(!blk_should_fake_timeout(req->q)))
drivers/mmc/core/block.c
1854
struct mmc_queue *mq = req->q->queuedata;
drivers/mmc/core/block.c
1945
pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
drivers/mmc/core/block.c
2116
struct request_queue *q = req->q;
drivers/mmc/core/block.c
2117
struct mmc_queue *mq = q->queuedata;
drivers/mmc/core/block.c
2142
else if (likely(!blk_should_fake_timeout(req->q)))
drivers/mmc/core/block.c
2148
struct mmc_queue *mq = req->q->queuedata;
drivers/mmc/core/block.c
2153
else if (likely(!blk_should_fake_timeout(req->q)))
drivers/mmc/core/block.c
2207
} else if (likely(!blk_should_fake_timeout(req->q))) {
drivers/mmc/core/block.c
2273
struct request_queue *q = req->q;
drivers/mmc/core/block.c
2274
struct mmc_queue *mq = q->queuedata;
drivers/mmc/core/block.c
3053
struct list_head *pos, *q;
drivers/mmc/core/block.c
3058
list_for_each_safe(pos, q, &md->rpmbs) {
drivers/mmc/core/block.c
3064
list_for_each_safe(pos, q, &md->part) {
drivers/mmc/core/crypto.c
22
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
drivers/mmc/core/crypto.c
25
blk_crypto_register(&host->crypto_profile, q);
drivers/mmc/core/crypto.h
19
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host);
drivers/mmc/core/crypto.h
29
static inline void mmc_crypto_setup_queue(struct request_queue *q,
drivers/mmc/core/queue.c
122
struct request_queue *q = req->q;
drivers/mmc/core/queue.c
123
struct mmc_queue *mq = q->queuedata;
drivers/mmc/core/queue.c
140
struct request_queue *q = mq->queue;
drivers/mmc/core/queue.c
163
blk_mq_run_hw_queues(q, true);
drivers/mmc/core/queue.c
233
struct request_queue *q = req->q;
drivers/mmc/core/queue.c
234
struct mmc_queue *mq = q->queuedata;
drivers/mmc/core/queue.c
491
struct request_queue *q = mq->queue;
drivers/mmc/core/queue.c
497
if (blk_queue_quiesced(q))
drivers/mmc/core/queue.c
498
blk_mq_unquiesce_queue(q);
drivers/mmc/core/queue.c
86
struct request_queue *q = req->q;
drivers/mmc/core/queue.c
87
struct mmc_queue *mq = q->queuedata;
drivers/mmc/core/queue.c
99
struct mmc_queue *mq = req->q->queuedata;
drivers/mmc/host/renesas_sdhi.h
44
#define sdhi_has_quirk(p, q) ((p)->quirks && (p)->quirks->q)
drivers/mmc/host/sdhci-esdhc-mcf.c
227
int i, q, ri, rq;
drivers/mmc/host/sdhci-esdhc-mcf.c
256
for (q = 1; q < 17; ++q) {
drivers/mmc/host/sdhci-esdhc-mcf.c
257
int finale = result / q;
drivers/mmc/host/sdhci-esdhc-mcf.c
264
rq = q;
drivers/mtd/ubi/block.c
183
struct ubiblock *dev = req->q->queuedata;
drivers/net/dsa/ocelot/felix_vsc9959.c
2003
struct list_head *pos, *q, *last;
drivers/net/dsa/ocelot/felix_vsc9959.c
2011
list_for_each_safe(pos, q, &psfp->sfi_list) {
drivers/net/dsa/ocelot/felix_vsc9959.c
2038
struct list_head *pos, *q, *last;
drivers/net/dsa/ocelot/felix_vsc9959.c
2046
list_for_each_safe(pos, q, &psfp->sfi_list) {
drivers/net/ethernet/airoha/airoha_eth.c
1003
irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
drivers/net/ethernet/airoha/airoha_eth.c
1005
if (!irq_q->q)
drivers/net/ethernet/airoha/airoha_eth.c
1008
memset(irq_q->q, 0xff, size * sizeof(u32));
drivers/net/ethernet/airoha/airoha_eth.c
1042
static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
drivers/net/ethernet/airoha/airoha_eth.c
1044
struct airoha_eth *eth = q->qdma->eth;
drivers/net/ethernet/airoha/airoha_eth.c
1047
spin_lock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
1048
for (i = 0; i < q->ndesc; i++) {
drivers/net/ethernet/airoha/airoha_eth.c
1049
struct airoha_queue_entry *e = &q->entry[i];
drivers/net/ethernet/airoha/airoha_eth.c
1059
list_add_tail(&e->list, &q->tx_list);
drivers/net/ethernet/airoha/airoha_eth.c
1060
q->queued--;
drivers/net/ethernet/airoha/airoha_eth.c
1062
spin_unlock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
1927
struct airoha_queue *q;
drivers/net/ethernet/airoha/airoha_eth.c
1965
q = &qdma->q_tx[qid];
drivers/net/ethernet/airoha/airoha_eth.c
1966
if (WARN_ON_ONCE(!q->ndesc))
drivers/net/ethernet/airoha/airoha_eth.c
1969
spin_lock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
1974
if (q->queued + nr_frags >= q->ndesc) {
drivers/net/ethernet/airoha/airoha_eth.c
1977
spin_unlock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
1984
e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
drivers/net/ethernet/airoha/airoha_eth.c
1986
index = e - q->entry;
drivers/net/ethernet/airoha/airoha_eth.c
1989
struct airoha_qdma_desc *desc = &q->desc[index];
drivers/net/ethernet/airoha/airoha_eth.c
2004
e = list_first_entry(&q->tx_list, struct airoha_queue_entry,
drivers/net/ethernet/airoha/airoha_eth.c
2006
index = e - q->entry;
drivers/net/ethernet/airoha/airoha_eth.c
2022
q->queued += i;
drivers/net/ethernet/airoha/airoha_eth.c
2032
if (q->ndesc - q->queued < q->free_thr)
drivers/net/ethernet/airoha/airoha_eth.c
2035
spin_unlock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
2046
list_move_tail(&e->list, &q->tx_list);
drivers/net/ethernet/airoha/airoha_eth.c
2049
spin_unlock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
2743
int q;
drivers/net/ethernet/airoha/airoha_eth.c
2745
for_each_set_bit(q, port->qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS)
drivers/net/ethernet/airoha/airoha_eth.c
2746
airoha_tc_remove_htb_queue(port, q);
drivers/net/ethernet/airoha/airoha_eth.c
297
int q;
drivers/net/ethernet/airoha/airoha_eth.c
308
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
309
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
drivers/net/ethernet/airoha/airoha_eth.c
312
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
313
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
drivers/net/ethernet/airoha/airoha_eth.c
316
for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
317
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
drivers/net/ethernet/airoha/airoha_eth.c
319
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
320
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
drivers/net/ethernet/airoha/airoha_eth.c
323
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
drivers/net/ethernet/airoha/airoha_eth.c
324
if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
drivers/net/ethernet/airoha/airoha_eth.c
325
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
drivers/net/ethernet/airoha/airoha_eth.c
328
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
drivers/net/ethernet/airoha/airoha_eth.c
331
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
332
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
drivers/net/ethernet/airoha/airoha_eth.c
335
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
drivers/net/ethernet/airoha/airoha_eth.c
336
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
drivers/net/ethernet/airoha/airoha_eth.c
338
for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
339
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
drivers/net/ethernet/airoha/airoha_eth.c
343
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
drivers/net/ethernet/airoha/airoha_eth.c
344
if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
drivers/net/ethernet/airoha/airoha_eth.c
346
q,
drivers/net/ethernet/airoha/airoha_eth.c
350
q, 0);
drivers/net/ethernet/airoha/airoha_eth.c
354
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
355
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
drivers/net/ethernet/airoha/airoha_eth.c
358
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
drivers/net/ethernet/airoha/airoha_eth.c
359
airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
drivers/net/ethernet/airoha/airoha_eth.c
540
static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
drivers/net/ethernet/airoha/airoha_eth.c
542
struct airoha_qdma *qdma = q->qdma;
drivers/net/ethernet/airoha/airoha_eth.c
543
int qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
546
while (q->queued < q->ndesc - 1) {
drivers/net/ethernet/airoha/airoha_eth.c
547
struct airoha_queue_entry *e = &q->entry[q->head];
drivers/net/ethernet/airoha/airoha_eth.c
548
struct airoha_qdma_desc *desc = &q->desc[q->head];
drivers/net/ethernet/airoha/airoha_eth.c
553
page = page_pool_dev_alloc_frag(q->page_pool, &offset,
drivers/net/ethernet/airoha/airoha_eth.c
554
q->buf_size);
drivers/net/ethernet/airoha/airoha_eth.c
558
q->head = (q->head + 1) % q->ndesc;
drivers/net/ethernet/airoha/airoha_eth.c
559
q->queued++;
drivers/net/ethernet/airoha/airoha_eth.c
564
e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/ethernet/airoha/airoha_eth.c
569
val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
drivers/net/ethernet/airoha/airoha_eth.c
578
FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
drivers/net/ethernet/airoha/airoha_eth.c
604
static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
drivers/net/ethernet/airoha/airoha_eth.c
606
enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
drivers/net/ethernet/airoha/airoha_eth.c
607
struct airoha_qdma *qdma = q->qdma;
drivers/net/ethernet/airoha/airoha_eth.c
609
int qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
613
struct airoha_queue_entry *e = &q->entry[q->tail];
drivers/net/ethernet/airoha/airoha_eth.c
614
struct airoha_qdma_desc *desc = &q->desc[q->tail];
drivers/net/ethernet/airoha/airoha_eth.c
624
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/ethernet/airoha/airoha_eth.c
625
q->queued--;
drivers/net/ethernet/airoha/airoha_eth.c
628
SKB_WITH_OVERHEAD(q->buf_size), dir);
drivers/net/ethernet/airoha/airoha_eth.c
631
data_len = q->skb ? q->buf_size
drivers/net/ethernet/airoha/airoha_eth.c
632
: SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/ethernet/airoha/airoha_eth.c
641
if (!q->skb) { /* first buffer */
drivers/net/ethernet/airoha/airoha_eth.c
642
q->skb = napi_build_skb(e->buf, q->buf_size);
drivers/net/ethernet/airoha/airoha_eth.c
643
if (!q->skb)
drivers/net/ethernet/airoha/airoha_eth.c
646
__skb_put(q->skb, len);
drivers/net/ethernet/airoha/airoha_eth.c
647
skb_mark_for_recycle(q->skb);
drivers/net/ethernet/airoha/airoha_eth.c
648
q->skb->dev = port->dev;
drivers/net/ethernet/airoha/airoha_eth.c
649
q->skb->protocol = eth_type_trans(q->skb, port->dev);
drivers/net/ethernet/airoha/airoha_eth.c
650
q->skb->ip_summed = CHECKSUM_UNNECESSARY;
drivers/net/ethernet/airoha/airoha_eth.c
651
skb_record_rx_queue(q->skb, qid);
drivers/net/ethernet/airoha/airoha_eth.c
653
struct skb_shared_info *shinfo = skb_shinfo(q->skb);
drivers/net/ethernet/airoha/airoha_eth.c
659
skb_add_rx_frag(q->skb, nr_frags, page,
drivers/net/ethernet/airoha/airoha_eth.c
661
q->buf_size);
drivers/net/ethernet/airoha/airoha_eth.c
678
skb_dst_set_noref(q->skb,
drivers/net/ethernet/airoha/airoha_eth.c
684
skb_set_hash(q->skb, jhash_1word(hash, 0),
drivers/net/ethernet/airoha/airoha_eth.c
689
airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
drivers/net/ethernet/airoha/airoha_eth.c
693
napi_gro_receive(&q->napi, q->skb);
drivers/net/ethernet/airoha/airoha_eth.c
694
q->skb = NULL;
drivers/net/ethernet/airoha/airoha_eth.c
697
if (q->skb) {
drivers/net/ethernet/airoha/airoha_eth.c
698
dev_kfree_skb(q->skb);
drivers/net/ethernet/airoha/airoha_eth.c
699
q->skb = NULL;
drivers/net/ethernet/airoha/airoha_eth.c
701
page_pool_put_full_page(q->page_pool, page, true);
drivers/net/ethernet/airoha/airoha_eth.c
704
airoha_qdma_fill_rx_queue(q);
drivers/net/ethernet/airoha/airoha_eth.c
711
struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
drivers/net/ethernet/airoha/airoha_eth.c
715
cur = airoha_qdma_rx_process(q, budget - done);
drivers/net/ethernet/airoha/airoha_eth.c
720
struct airoha_qdma *qdma = q->qdma;
drivers/net/ethernet/airoha/airoha_eth.c
721
int i, qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
737
static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
drivers/net/ethernet/airoha/airoha_eth.c
748
.napi = &q->napi,
drivers/net/ethernet/airoha/airoha_eth.c
751
int qid = q - &qdma->q_rx[0], thr;
drivers/net/ethernet/airoha/airoha_eth.c
754
q->buf_size = PAGE_SIZE / 2;
drivers/net/ethernet/airoha/airoha_eth.c
755
q->ndesc = ndesc;
drivers/net/ethernet/airoha/airoha_eth.c
756
q->qdma = qdma;
drivers/net/ethernet/airoha/airoha_eth.c
758
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
drivers/net/ethernet/airoha/airoha_eth.c
760
if (!q->entry)
drivers/net/ethernet/airoha/airoha_eth.c
763
q->page_pool = page_pool_create(&pp_params);
drivers/net/ethernet/airoha/airoha_eth.c
764
if (IS_ERR(q->page_pool)) {
drivers/net/ethernet/airoha/airoha_eth.c
765
int err = PTR_ERR(q->page_pool);
drivers/net/ethernet/airoha/airoha_eth.c
767
q->page_pool = NULL;
drivers/net/ethernet/airoha/airoha_eth.c
771
q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
drivers/net/ethernet/airoha/airoha_eth.c
773
if (!q->desc)
drivers/net/ethernet/airoha/airoha_eth.c
776
netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
drivers/net/ethernet/airoha/airoha_eth.c
787
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
drivers/net/ethernet/airoha/airoha_eth.c
790
airoha_qdma_fill_rx_queue(q);
drivers/net/ethernet/airoha/airoha_eth.c
795
static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
drivers/net/ethernet/airoha/airoha_eth.c
797
struct airoha_qdma *qdma = q->qdma;
drivers/net/ethernet/airoha/airoha_eth.c
799
int qid = q - &qdma->q_rx[0];
drivers/net/ethernet/airoha/airoha_eth.c
801
while (q->queued) {
drivers/net/ethernet/airoha/airoha_eth.c
802
struct airoha_queue_entry *e = &q->entry[q->tail];
drivers/net/ethernet/airoha/airoha_eth.c
803
struct airoha_qdma_desc *desc = &q->desc[q->tail];
drivers/net/ethernet/airoha/airoha_eth.c
807
page_pool_get_dma_dir(q->page_pool));
drivers/net/ethernet/airoha/airoha_eth.c
808
page_pool_put_full_page(q->page_pool, page, false);
drivers/net/ethernet/airoha/airoha_eth.c
818
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/ethernet/airoha/airoha_eth.c
819
q->queued--;
drivers/net/ethernet/airoha/airoha_eth.c
822
q->head = q->tail;
drivers/net/ethernet/airoha/airoha_eth.c
824
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail));
drivers/net/ethernet/airoha/airoha_eth.c
867
u32 qid, val = irq_q->q[head];
drivers/net/ethernet/airoha/airoha_eth.c
870
struct airoha_queue *q;
drivers/net/ethernet/airoha/airoha_eth.c
877
irq_q->q[head] = 0xff; /* mark as done */
drivers/net/ethernet/airoha/airoha_eth.c
886
q = &qdma->q_tx[qid];
drivers/net/ethernet/airoha/airoha_eth.c
887
if (!q->ndesc)
drivers/net/ethernet/airoha/airoha_eth.c
891
if (index >= q->ndesc)
drivers/net/ethernet/airoha/airoha_eth.c
894
spin_lock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
896
if (!q->queued)
drivers/net/ethernet/airoha/airoha_eth.c
899
desc = &q->desc[index];
drivers/net/ethernet/airoha/airoha_eth.c
906
e = &q->entry[index];
drivers/net/ethernet/airoha/airoha_eth.c
912
list_add_tail(&e->list, &q->tx_list);
drivers/net/ethernet/airoha/airoha_eth.c
916
q->queued--;
drivers/net/ethernet/airoha/airoha_eth.c
925
q->ndesc - q->queued >= q->free_thr)
drivers/net/ethernet/airoha/airoha_eth.c
931
spin_unlock_bh(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
951
static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
drivers/net/ethernet/airoha/airoha_eth.c
955
int i, qid = q - &qdma->q_tx[0];
drivers/net/ethernet/airoha/airoha_eth.c
958
spin_lock_init(&q->lock);
drivers/net/ethernet/airoha/airoha_eth.c
959
q->ndesc = size;
drivers/net/ethernet/airoha/airoha_eth.c
960
q->qdma = qdma;
drivers/net/ethernet/airoha/airoha_eth.c
961
q->free_thr = 1 + MAX_SKB_FRAGS;
drivers/net/ethernet/airoha/airoha_eth.c
962
INIT_LIST_HEAD(&q->tx_list);
drivers/net/ethernet/airoha/airoha_eth.c
964
q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
drivers/net/ethernet/airoha/airoha_eth.c
966
if (!q->entry)
drivers/net/ethernet/airoha/airoha_eth.c
969
q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
drivers/net/ethernet/airoha/airoha_eth.c
971
if (!q->desc)
drivers/net/ethernet/airoha/airoha_eth.c
974
for (i = 0; i < q->ndesc; i++) {
drivers/net/ethernet/airoha/airoha_eth.c
977
list_add_tail(&q->entry[i].list, &q->tx_list);
drivers/net/ethernet/airoha/airoha_eth.c
978
WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
drivers/net/ethernet/airoha/airoha_eth.h
210
u32 *q;
drivers/net/ethernet/airoha/airoha_npu.c
581
static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
drivers/net/ethernet/airoha/airoha_npu.c
589
static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
drivers/net/ethernet/airoha/airoha_npu.c
591
regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
drivers/net/ethernet/airoha/airoha_npu.c
594
static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
drivers/net/ethernet/airoha/airoha_npu.c
596
regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
drivers/net/ethernet/amazon/ena/ena_netdev.h
102
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
drivers/net/ethernet/amazon/ena/ena_netdev.h
95
#define ENA_IO_TXQ_IDX(q) (2 * (q))
drivers/net/ethernet/amazon/ena/ena_netdev.h
96
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
drivers/net/ethernet/amazon/ena/ena_netdev.h
97
#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
drivers/net/ethernet/amazon/ena/ena_netdev.h
98
#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
drivers/net/ethernet/amd/pds_core/adminq.c
104
q_info = &q->info[q->tail_idx];
drivers/net/ethernet/amd/pds_core/adminq.c
105
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/amd/pds_core/adminq.c
162
struct pdsc_queue *q = &qcq->q;
drivers/net/ethernet/amd/pds_core/adminq.c
172
avail = q->tail_idx;
drivers/net/ethernet/amd/pds_core/adminq.c
173
if (q->head_idx >= avail)
drivers/net/ethernet/amd/pds_core/adminq.c
174
avail += q->num_descs - q->head_idx - 1;
drivers/net/ethernet/amd/pds_core/adminq.c
176
avail -= q->head_idx + 1;
drivers/net/ethernet/amd/pds_core/adminq.c
200
index = q->head_idx;
drivers/net/ethernet/amd/pds_core/adminq.c
201
q_info = &q->info[index];
drivers/net/ethernet/amd/pds_core/adminq.c
207
q->head_idx, q->tail_idx);
drivers/net/ethernet/amd/pds_core/adminq.c
212
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/amd/pds_core/adminq.c
215
q->hw_type, q->dbval | q->head_idx);
drivers/net/ethernet/amd/pds_core/adminq.c
250
wc = &pdsc->adminqcq.q.info[index].completion;
drivers/net/ethernet/amd/pds_core/adminq.c
73
struct pdsc_queue *q = &qcq->q;
drivers/net/ethernet/amd/pds_core/adminq.c
94
if (q->tail_idx == q->head_idx)
drivers/net/ethernet/amd/pds_core/core.c
127
PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
drivers/net/ethernet/amd/pds_core/core.c
157
vfree(qcq->q.info);
drivers/net/ethernet/amd/pds_core/core.c
162
static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
drivers/net/ethernet/amd/pds_core/core.c
167
q->base = base;
drivers/net/ethernet/amd/pds_core/core.c
168
q->base_pa = base_pa;
drivers/net/ethernet/amd/pds_core/core.c
170
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
drivers/net/ethernet/amd/pds_core/core.c
171
cur->desc = base + (i * q->desc_size);
drivers/net/ethernet/amd/pds_core/core.c
199
qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
drivers/net/ethernet/amd/pds_core/core.c
200
if (!qcq->q.info) {
drivers/net/ethernet/amd/pds_core/core.c
209
qcq->q.type = type;
drivers/net/ethernet/amd/pds_core/core.c
210
qcq->q.index = index;
drivers/net/ethernet/amd/pds_core/core.c
211
qcq->q.num_descs = num_descs;
drivers/net/ethernet/amd/pds_core/core.c
212
qcq->q.desc_size = desc_size;
drivers/net/ethernet/amd/pds_core/core.c
213
qcq->q.tail_idx = 0;
drivers/net/ethernet/amd/pds_core/core.c
214
qcq->q.head_idx = 0;
drivers/net/ethernet/amd/pds_core/core.c
215
qcq->q.pid = pid;
drivers/net/ethernet/amd/pds_core/core.c
216
snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
drivers/net/ethernet/amd/pds_core/core.c
248
pdsc_q_map(&qcq->q, q_base, q_base_pa);
drivers/net/ethernet/amd/pds_core/core.c
269
pdsc_q_map(&qcq->q, q_base, q_base_pa);
drivers/net/ethernet/amd/pds_core/core.c
285
qcq->cq.bound_q = &qcq->q;
drivers/net/ethernet/amd/pds_core/core.c
298
vfree(qcq->q.info);
drivers/net/ethernet/amd/pds_core/core.c
354
cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
drivers/net/ethernet/amd/pds_core/core.c
355
cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
drivers/net/ethernet/amd/pds_core/core.c
386
pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
drivers/net/ethernet/amd/pds_core/core.c
387
pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
drivers/net/ethernet/amd/pds_core/core.c
388
pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
drivers/net/ethernet/amd/pds_core/core.c
390
pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
drivers/net/ethernet/amd/pds_core/core.c
391
pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
drivers/net/ethernet/amd/pds_core/core.c
392
pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
drivers/net/ethernet/amd/pds_core/core.h
127
struct pdsc_queue q;
drivers/net/ethernet/amd/pds_core/debugfs.c
111
struct pdsc_queue *q = &qcq->q;
drivers/net/ethernet/amd/pds_core/debugfs.c
114
qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
drivers/net/ethernet/amd/pds_core/debugfs.c
129
debugfs_create_u32("index", 0400, q_dentry, &q->index);
drivers/net/ethernet/amd/pds_core/debugfs.c
130
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
drivers/net/ethernet/amd/pds_core/debugfs.c
131
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
drivers/net/ethernet/amd/pds_core/debugfs.c
132
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
drivers/net/ethernet/amd/pds_core/debugfs.c
134
debugfs_create_u16("tail", 0400, q_dentry, &q->tail_idx);
drivers/net/ethernet/amd/pds_core/debugfs.c
135
debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
116
unsigned int tc, q;
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
138
for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) {
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
139
rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q);
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
140
if (HW_ATL2_RX_Q_TC_MAP_ADR(q) !=
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
141
HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) {
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
143
HW_ATL2_RX_Q_TC_MAP_ADR(q),
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
148
tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q);
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
149
if (HW_ATL2_TX_Q_TC_MAP_ADR(q) !=
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
150
HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) {
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
152
HW_ATL2_TX_Q_TC_MAP_ADR(q),
drivers/net/ethernet/asix/ax88796c_main.c
244
ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
drivers/net/ethernet/asix/ax88796c_main.c
257
if (skb_queue_empty(q))
drivers/net/ethernet/asix/ax88796c_main.c
260
skb = skb_peek(q);
drivers/net/ethernet/asix/ax88796c_main.c
298
skb_unlink(skb, q);
drivers/net/ethernet/broadcom/bcmsysport.c
2270
unsigned int q, port;
drivers/net/ethernet/broadcom/bcmsysport.c
2276
q = BRCM_TAG_GET_QUEUE(queue);
drivers/net/ethernet/broadcom/bcmsysport.c
2278
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
drivers/net/ethernet/broadcom/bcmsysport.c
2308
unsigned int q, qp, port;
drivers/net/ethernet/broadcom/bcmsysport.c
2336
for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
drivers/net/ethernet/broadcom/bcmsysport.c
2337
q++) {
drivers/net/ethernet/broadcom/bcmsysport.c
2338
ring = &priv->tx_rings[q];
drivers/net/ethernet/broadcom/bcmsysport.c
2363
unsigned int q, qp, port;
drivers/net/ethernet/broadcom/bcmsysport.c
2369
for (q = 0; q < dev->num_tx_queues; q++) {
drivers/net/ethernet/broadcom/bcmsysport.c
2370
ring = &priv->tx_rings[q];
drivers/net/ethernet/broadcom/bcmsysport.c
432
unsigned int q;
drivers/net/ethernet/broadcom/bcmsysport.c
434
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
drivers/net/ethernet/broadcom/bcmsysport.c
435
ring = &priv->tx_rings[q];
drivers/net/ethernet/broadcom/bcmsysport.c
986
unsigned int q;
drivers/net/ethernet/broadcom/bcmsysport.c
988
for (q = 0; q < priv->netdev->num_tx_queues; q++)
drivers/net/ethernet/broadcom/bcmsysport.c
989
bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
14382
struct list_head *pos, *q;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
14390
list_for_each_safe(pos, q, &bnx2x_prev_list) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5390
int q, rc;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5416
for_each_eth_queue(bp, q) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5418
fp = &bp->fp[q];
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5425
q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
5444
q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1411
struct bnx2x_vf_queue *q)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1413
u8 cl_id = vfq_cl_id(vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1421
bnx2x_init_queue_obj(bp, &q->sp_obj,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1422
cl_id, &q->cid, 1, func_id,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1428
q->sp_initialized = false;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1432
vf->abs_vfid, q->sp_obj.func_id, q->cid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
168
struct bnx2x_vf_queue *q,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
185
init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
186
init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
189
init_p->cxts[0] = q->cxt;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
195
setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2067
struct bnx2x_vf_queue *q = vfq_get(vf, i);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2069
if (!q) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2074
q->index = i;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2075
q->cxt = &((base_cxt + i)->eth);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2076
q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2079
vf->abs_vfid, i, q->index, q->cid, q->cxt);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
2082
bnx2x_vfq_init(bp, vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
219
rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
220
rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
230
setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
385
static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
387
return vf->igu_base_id + q->index;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
390
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
395
return vfq_cl_id(vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
398
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
400
return vfq_cl_id(vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
447
struct bnx2x_vf_queue *q,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1539
struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1545
if (bnx2x_vfq_is_leading(q))
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1546
bnx2x_leading_vfq_init(bp, vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1564
q->sb_idx = setup_q->txq.vf_sb;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1585
q->index, q->sb_idx);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1597
q->sb_idx = setup_q->rxq.vf_sb;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1627
if (bnx2x_vfq_is_leading(q)) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1635
q->index, q->sb_idx);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1638
bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1640
rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
545
struct bnx2x_vf_queue *q)
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
547
u8 cl_id = vfq_cl_id(vf, q);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
551
bnx2x_init_mac_obj(bp, &q->mac_obj,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
552
cl_id, q->cid, func_id,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
560
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
561
cl_id, q->cid, func_id,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
569
bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
570
cl_id, q->cid, func_id,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
580
q->cid, func_id, func_id,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
588
bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
597
q->is_leading = true;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
598
q->sp_initialized = true;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3479
unsigned int q;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3483
for (q = 0; q <= priv->hw_params->tx_queues; q++)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3484
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3488
for (q = 0; q <= priv->hw_params->tx_queues; q++)
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3489
int1_enable |= (1 << q);
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3599
unsigned int q;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3602
for (q = 0; q <= priv->hw_params->tx_queues; q++) {
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3603
tx_stats = &priv->tx_rings[q].stats64;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3618
for (q = 0; q <= priv->hw_params->rx_queues; q++) {
drivers/net/ethernet/broadcom/genet/bcmgenet.c
3619
rx_stats = &priv->rx_rings[q].stats64;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
601
u32 offset = 0, f_length = 0, f, q;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
690
q = 0;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
692
q = priv->hw_params->rx_queues + 1;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
695
q = fs->ring_cookie;
drivers/net/ethernet/broadcom/genet/bcmgenet.c
696
bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
drivers/net/ethernet/broadcom/genet/bcmgenet.h
434
#define DMA_PRIO_REG_INDEX(q) ((q) / 6)
drivers/net/ethernet/broadcom/genet/bcmgenet.h
435
#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT)
drivers/net/ethernet/brocade/bna/bfi_enet.h
165
struct bfi_enet_q q;
drivers/net/ethernet/brocade/bna/bfi_enet.h
171
struct bfi_enet_q q;
drivers/net/ethernet/brocade/bna/bfi_enet.h
177
struct bfi_enet_q q;
drivers/net/ethernet/brocade/bna/bfi_enet.h
422
struct bfi_enet_txq q;
drivers/net/ethernet/brocade/bna/bna.h
238
static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr)
drivers/net/ethernet/brocade/bna/bna.h
242
list_for_each_entry(mac, q, qe)
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1635
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1643
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
1664
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3101
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
drivers/net/ethernet/brocade/bna/bna_tx_rx.c
3102
cfg_req->q_cfg[i].q.priority = txq->priority;
drivers/net/ethernet/cadence/macb_main.c
1851
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
1854
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
1872
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
drivers/net/ethernet/cadence/macb_main.c
2073
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2076
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
drivers/net/ethernet/cadence/macb_main.c
2483
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2486
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2536
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2553
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2564
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2567
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2602
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2627
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2628
queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
drivers/net/ethernet/cadence/macb_main.c
2629
queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
drivers/net/ethernet/cadence/macb_main.c
2631
queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
drivers/net/ethernet/cadence/macb_main.c
2632
queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
drivers/net/ethernet/cadence/macb_main.c
2684
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2687
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2725
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2746
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2831
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
2837
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
2838
if (q)
drivers/net/ethernet/cadence/macb_main.c
3032
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
3054
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
3085
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
3100
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
3104
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
3107
netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
drivers/net/ethernet/cadence/macb_main.c
3155
unsigned int i, q, idx;
drivers/net/ethernet/cadence/macb_main.c
3176
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
drivers/net/ethernet/cadence/macb_main.c
3249
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
3257
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
3260
q, queue_statistics[i].stat_string);
drivers/net/ethernet/cadence/macb_main.c
4332
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
4343
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
4588
unsigned int hw_q, q;
drivers/net/ethernet/cadence/macb_main.c
460
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
4601
for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
drivers/net/ethernet/cadence/macb_main.c
4602
queue = &bp->queues[q];
drivers/net/ethernet/cadence/macb_main.c
4634
queue->irq = platform_get_irq(pdev, q);
drivers/net/ethernet/cadence/macb_main.c
4645
q++;
drivers/net/ethernet/cadence/macb_main.c
470
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
4757
struct macb_queue *q = &lp->queues[0];
drivers/net/ethernet/cadence/macb_main.c
4759
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
drivers/net/ethernet/cadence/macb_main.c
4762
&q->rx_ring_dma, GFP_KERNEL);
drivers/net/ethernet/cadence/macb_main.c
4763
if (!q->rx_ring)
drivers/net/ethernet/cadence/macb_main.c
4766
q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
drivers/net/ethernet/cadence/macb_main.c
4769
&q->rx_buffers_dma, GFP_KERNEL);
drivers/net/ethernet/cadence/macb_main.c
4770
if (!q->rx_buffers) {
drivers/net/ethernet/cadence/macb_main.c
4774
q->rx_ring, q->rx_ring_dma);
drivers/net/ethernet/cadence/macb_main.c
4775
q->rx_ring = NULL;
drivers/net/ethernet/cadence/macb_main.c
4784
struct macb_queue *q = &lp->queues[0];
drivers/net/ethernet/cadence/macb_main.c
4786
if (q->rx_ring) {
drivers/net/ethernet/cadence/macb_main.c
4790
q->rx_ring, q->rx_ring_dma);
drivers/net/ethernet/cadence/macb_main.c
4791
q->rx_ring = NULL;
drivers/net/ethernet/cadence/macb_main.c
4794
if (q->rx_buffers) {
drivers/net/ethernet/cadence/macb_main.c
4798
q->rx_buffers, q->rx_buffers_dma);
drivers/net/ethernet/cadence/macb_main.c
4799
q->rx_buffers = NULL;
drivers/net/ethernet/cadence/macb_main.c
4806
struct macb_queue *q = &lp->queues[0];
drivers/net/ethernet/cadence/macb_main.c
4816
addr = q->rx_buffers_dma;
drivers/net/ethernet/cadence/macb_main.c
4818
desc = macb_rx_desc(q, i);
drivers/net/ethernet/cadence/macb_main.c
4828
q->rx_tail = 0;
drivers/net/ethernet/cadence/macb_main.c
4831
macb_writel(lp, RBQP, q->rx_ring_dma);
drivers/net/ethernet/cadence/macb_main.c
4965
struct macb_queue *q = &lp->queues[0];
drivers/net/ethernet/cadence/macb_main.c
4971
desc = macb_rx_desc(q, q->rx_tail);
drivers/net/ethernet/cadence/macb_main.c
4973
p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
drivers/net/ethernet/cadence/macb_main.c
4995
if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
drivers/net/ethernet/cadence/macb_main.c
4996
q->rx_tail = 0;
drivers/net/ethernet/cadence/macb_main.c
4998
q->rx_tail++;
drivers/net/ethernet/cadence/macb_main.c
5000
desc = macb_rx_desc(q, q->rx_tail);
drivers/net/ethernet/cadence/macb_main.c
5780
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
5817
for (q = 0, queue = bp->queues; q < bp->num_queues;
drivers/net/ethernet/cadence/macb_main.c
5818
++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
5883
for (q = 0, queue = bp->queues; q < bp->num_queues;
drivers/net/ethernet/cadence/macb_main.c
5884
++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
5918
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
5970
for (q = 0, queue = bp->queues; q < bp->num_queues;
drivers/net/ethernet/cadence/macb_main.c
5971
++q, ++queue) {
drivers/net/ethernet/cadence/macb_main.c
657
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
661
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
drivers/net/ethernet/cadence/macb_main.c
757
int q;
drivers/net/ethernet/cadence/macb_main.c
759
for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++)
drivers/net/ethernet/cadence/macb_main.c
773
unsigned int q;
drivers/net/ethernet/cadence/macb_main.c
800
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
335
#define CN23XX_SLI_PKT_MBOX_INT(q) \
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
336
(CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
338
#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
340
((q) * CN23XX_SLI_MBOX_OFFSET + \
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
214
#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
215
(CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
217
#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
219
((q) * CN23XX_SLI_MBOX_OFFSET + \
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
226
#define CN23XX_VF_SLI_INT_SUM(q) \
drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
227
(CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET))
drivers/net/ethernet/cavium/liquidio/lio_core.c
467
int q, q_no;
drivers/net/ethernet/cavium/liquidio/lio_core.c
469
for (q = 0; q < oct->num_oqs; q++) {
drivers/net/ethernet/cavium/liquidio/lio_core.c
470
q_no = lio->linfo.rxpciq[q].s.q_no;
drivers/net/ethernet/cavium/liquidio/lio_core.c
821
int q, q_no;
drivers/net/ethernet/cavium/liquidio/lio_core.c
839
for (q = 0; q < num_oqs; q++) {
drivers/net/ethernet/cavium/liquidio/lio_core.c
840
q_no = lio->linfo.rxpciq[q].s.q_no;
drivers/net/ethernet/cavium/liquidio/lio_core.c
843
__func__, q, q_no);
drivers/net/ethernet/cavium/liquidio/lio_core.c
883
for (q = 0; q < num_iqs; q++) {
drivers/net/ethernet/cavium/liquidio/lio_core.c
886
retval = octeon_setup_iq(octeon_dev, ifidx, q,
drivers/net/ethernet/cavium/liquidio/lio_core.c
887
lio->linfo.txpciq[q], num_tx_descs,
drivers/net/ethernet/cavium/liquidio/lio_core.c
888
netdev_get_tx_queue(netdev, q));
drivers/net/ethernet/cavium/liquidio/lio_core.c
901
ioq_vector = &octeon_dev->ioq_vector[q];
drivers/net/ethernet/cavium/liquidio/lio_main.c
460
int q, iq;
drivers/net/ethernet/cavium/liquidio/lio_main.c
463
for (q = 0; q < numqs; q++) {
drivers/net/ethernet/cavium/liquidio/lio_main.c
464
iq = lio->linfo.txpciq[q %
drivers/net/ethernet/cavium/liquidio/lio_main.c
468
if (__netif_subqueue_stopped(lio->netdev, q)) {
drivers/net/ethernet/cavium/liquidio/lio_main.c
469
netif_wake_subqueue(lio->netdev, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
369
int mbox, key, stat, q;
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
402
for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
403
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
404
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
405
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
406
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
407
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
408
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
409
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
410
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
411
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
412
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
416
for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
417
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
419
NIC_QSET_RQ_0_7_STAT_0_1, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
421
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
424
for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
425
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
426
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
427
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
428
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
429
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
430
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
431
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
432
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
437
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
439
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
442
for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
443
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
444
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
445
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
446
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
447
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
448
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
450
NIC_QSET_RBDR_0_1_STATUS0, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
452
NIC_QSET_RBDR_0_1_STATUS1, q);
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
454
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1167
struct cmdQ *q)
drivers/net/ethernet/chelsio/cxgb/sge.c
1183
if (++pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1186
ce1 = q->centries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1187
e1 = q->entries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1203
struct cmdQ *q)
drivers/net/ethernet/chelsio/cxgb/sge.c
1211
e = e1 = &q->entries[pidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
1212
ce = &q->centries[pidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
1237
if (++pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1240
e1 = q->entries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1241
ce = q->centries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1245
nfrags, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1260
if (++pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1263
e1 = q->entries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1264
ce = q->centries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1274
nfrags, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1290
static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
drivers/net/ethernet/chelsio/cxgb/sge.c
1292
unsigned int reclaim = q->processed - q->cleaned;
drivers/net/ethernet/chelsio/cxgb/sge.c
1296
q->processed, q->cleaned);
drivers/net/ethernet/chelsio/cxgb/sge.c
1297
free_cmdQ_buffers(sge, q, reclaim);
drivers/net/ethernet/chelsio/cxgb/sge.c
1298
q->cleaned += reclaim;
drivers/net/ethernet/chelsio/cxgb/sge.c
1311
struct cmdQ *q = &sge->cmdQ[0];
drivers/net/ethernet/chelsio/cxgb/sge.c
1315
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
1316
reclaim_completed_tx(sge, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1318
credits = q->size - q->in_use;
drivers/net/ethernet/chelsio/cxgb/sge.c
1324
q->in_use += count;
drivers/net/ethernet/chelsio/cxgb/sge.c
1325
genbit = q->genbit;
drivers/net/ethernet/chelsio/cxgb/sge.c
1326
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
1327
q->pidx += count;
drivers/net/ethernet/chelsio/cxgb/sge.c
1328
if (q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1329
q->pidx -= q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
1330
q->genbit ^= 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
1332
write_tx_descs(adapter, skb, pidx, genbit, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1333
credits = q->size - q->in_use;
drivers/net/ethernet/chelsio/cxgb/sge.c
1338
clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
drivers/net/ethernet/chelsio/cxgb/sge.c
1339
if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1340
set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
drivers/net/ethernet/chelsio/cxgb/sge.c
1344
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
1399
static inline int enough_free_Tx_descs(const struct cmdQ *q)
drivers/net/ethernet/chelsio/cxgb/sge.c
1401
unsigned int r = q->processed - q->cleaned;
drivers/net/ethernet/chelsio/cxgb/sge.c
1403
return q->in_use - r < (q->size >> 1);
drivers/net/ethernet/chelsio/cxgb/sge.c
1472
struct respQ *q = &sge->respQ;
drivers/net/ethernet/chelsio/cxgb/sge.c
1473
struct respQ_e *e = &q->entries[q->cidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
1478
while (done < budget && e->GenerationBit == q->genbit) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1524
if (unlikely(++q->cidx == q->size)) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1525
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
1526
q->genbit ^= 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
1527
e = q->entries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1531
if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1532
writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
drivers/net/ethernet/chelsio/cxgb/sge.c
1533
q->credits = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
1562
struct respQ *q = &sge->respQ;
drivers/net/ethernet/chelsio/cxgb/sge.c
1563
struct respQ_e *e = &q->entries[q->cidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
1579
if (unlikely(++q->cidx == q->size)) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1580
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
1581
q->genbit ^= 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
1582
e = q->entries;
drivers/net/ethernet/chelsio/cxgb/sge.c
1586
if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1587
writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
drivers/net/ethernet/chelsio/cxgb/sge.c
1588
q->credits = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
1591
} while (e->GenerationBit == q->genbit && !e->DataValid);
drivers/net/ethernet/chelsio/cxgb/sge.c
1596
return e->GenerationBit == q->genbit;
drivers/net/ethernet/chelsio/cxgb/sge.c
1701
struct cmdQ *q = &sge->cmdQ[qid];
drivers/net/ethernet/chelsio/cxgb/sge.c
1704
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
1706
reclaim_completed_tx(sge, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1708
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
1709
credits = q->size - q->in_use;
drivers/net/ethernet/chelsio/cxgb/sge.c
1722
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
1726
if (unlikely(credits - count < q->stop_thres)) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1743
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
1746
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
1751
q->in_use += count;
drivers/net/ethernet/chelsio/cxgb/sge.c
1752
genbit = q->genbit;
drivers/net/ethernet/chelsio/cxgb/sge.c
1753
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
1754
q->pidx += count;
drivers/net/ethernet/chelsio/cxgb/sge.c
1755
if (q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1756
q->pidx -= q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
1757
q->genbit ^= 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
1759
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
1761
write_tx_descs(adapter, skb, pidx, genbit, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1773
clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
drivers/net/ethernet/chelsio/cxgb/sge.c
1774
if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1775
set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
drivers/net/ethernet/chelsio/cxgb/sge.c
1781
if (spin_trylock(&q->lock)) {
drivers/net/ethernet/chelsio/cxgb/sge.c
1782
credits = q->size - q->in_use;
drivers/net/ethernet/chelsio/cxgb/sge.c
1928
struct cmdQ *q = &sge->cmdQ[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
1930
if (!spin_trylock(&q->lock))
drivers/net/ethernet/chelsio/cxgb/sge.c
1933
reclaim_completed_tx(sge, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
1934
if (i == 0 && q->in_use) { /* flush pending credits */
drivers/net/ethernet/chelsio/cxgb/sge.c
1937
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
474
struct cmdQ *q = &sge->cmdQ[0];
drivers/net/ethernet/chelsio/cxgb/sge.c
475
clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
drivers/net/ethernet/chelsio/cxgb/sge.c
476
if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
drivers/net/ethernet/chelsio/cxgb/sge.c
477
set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
drivers/net/ethernet/chelsio/cxgb/sge.c
499
static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
drivers/net/ethernet/chelsio/cxgb/sge.c
501
unsigned int cidx = q->cidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
503
while (q->credits--) {
drivers/net/ethernet/chelsio/cxgb/sge.c
504
struct freelQ_ce *ce = &q->centries[cidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
510
if (++cidx == q->size)
drivers/net/ethernet/chelsio/cxgb/sge.c
530
struct freelQ *q = &sge->freelQ[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
532
if (q->centries) {
drivers/net/ethernet/chelsio/cxgb/sge.c
533
free_freelQ_buffers(pdev, q);
drivers/net/ethernet/chelsio/cxgb/sge.c
534
kfree(q->centries);
drivers/net/ethernet/chelsio/cxgb/sge.c
536
if (q->entries) {
drivers/net/ethernet/chelsio/cxgb/sge.c
537
size = sizeof(struct freelQ_e) * q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
538
dma_free_coherent(&pdev->dev, size, q->entries,
drivers/net/ethernet/chelsio/cxgb/sge.c
539
q->dma_addr);
drivers/net/ethernet/chelsio/cxgb/sge.c
554
struct freelQ *q = &sge->freelQ[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
556
q->genbit = 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
557
q->size = p->freelQ_size[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
558
q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
drivers/net/ethernet/chelsio/cxgb/sge.c
559
size = sizeof(struct freelQ_e) * q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
560
q->entries = dma_alloc_coherent(&pdev->dev, size,
drivers/net/ethernet/chelsio/cxgb/sge.c
561
&q->dma_addr, GFP_KERNEL);
drivers/net/ethernet/chelsio/cxgb/sge.c
562
if (!q->entries)
drivers/net/ethernet/chelsio/cxgb/sge.c
565
size = sizeof(struct freelQ_ce) * q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
566
q->centries = kzalloc(size, GFP_KERNEL);
drivers/net/ethernet/chelsio/cxgb/sge.c
567
if (!q->centries)
drivers/net/ethernet/chelsio/cxgb/sge.c
612
static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
drivers/net/ethernet/chelsio/cxgb/sge.c
616
unsigned int cidx = q->cidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
618
q->in_use -= n;
drivers/net/ethernet/chelsio/cxgb/sge.c
619
ce = &q->centries[cidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
626
if (q->sop)
drivers/net/ethernet/chelsio/cxgb/sge.c
627
q->sop = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
631
q->sop = 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
634
if (++cidx == q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
636
ce = q->centries;
drivers/net/ethernet/chelsio/cxgb/sge.c
639
q->cidx = cidx;
drivers/net/ethernet/chelsio/cxgb/sge.c
653
struct cmdQ *q = &sge->cmdQ[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
655
if (q->centries) {
drivers/net/ethernet/chelsio/cxgb/sge.c
656
if (q->in_use)
drivers/net/ethernet/chelsio/cxgb/sge.c
657
free_cmdQ_buffers(sge, q, q->in_use);
drivers/net/ethernet/chelsio/cxgb/sge.c
658
kfree(q->centries);
drivers/net/ethernet/chelsio/cxgb/sge.c
660
if (q->entries) {
drivers/net/ethernet/chelsio/cxgb/sge.c
661
size = sizeof(struct cmdQ_e) * q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
662
dma_free_coherent(&pdev->dev, size, q->entries,
drivers/net/ethernet/chelsio/cxgb/sge.c
663
q->dma_addr);
drivers/net/ethernet/chelsio/cxgb/sge.c
677
struct cmdQ *q = &sge->cmdQ[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
679
q->genbit = 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
680
q->sop = 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
681
q->size = p->cmdQ_size[i];
drivers/net/ethernet/chelsio/cxgb/sge.c
682
q->in_use = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
683
q->status = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
684
q->processed = q->cleaned = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
685
q->stop_thres = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
686
spin_lock_init(&q->lock);
drivers/net/ethernet/chelsio/cxgb/sge.c
687
size = sizeof(struct cmdQ_e) * q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
688
q->entries = dma_alloc_coherent(&pdev->dev, size,
drivers/net/ethernet/chelsio/cxgb/sge.c
689
&q->dma_addr, GFP_KERNEL);
drivers/net/ethernet/chelsio/cxgb/sge.c
690
if (!q->entries)
drivers/net/ethernet/chelsio/cxgb/sge.c
693
size = sizeof(struct cmdQ_ce) * q->size;
drivers/net/ethernet/chelsio/cxgb/sge.c
694
q->centries = kzalloc(size, GFP_KERNEL);
drivers/net/ethernet/chelsio/cxgb/sge.c
695
if (!q->centries)
drivers/net/ethernet/chelsio/cxgb/sge.c
821
static void refill_free_list(struct sge *sge, struct freelQ *q)
drivers/net/ethernet/chelsio/cxgb/sge.c
824
struct freelQ_ce *ce = &q->centries[q->pidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
825
struct freelQ_e *e = &q->entries[q->pidx];
drivers/net/ethernet/chelsio/cxgb/sge.c
826
unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
drivers/net/ethernet/chelsio/cxgb/sge.c
828
while (q->credits < q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
832
skb = dev_alloc_skb(q->rx_buffer_size);
drivers/net/ethernet/chelsio/cxgb/sge.c
836
skb_reserve(skb, q->dma_offset);
drivers/net/ethernet/chelsio/cxgb/sge.c
846
e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
drivers/net/ethernet/chelsio/cxgb/sge.c
848
e->gen2 = V_CMD_GEN2(q->genbit);
drivers/net/ethernet/chelsio/cxgb/sge.c
852
if (++q->pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb/sge.c
853
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb/sge.c
854
q->genbit ^= 1;
drivers/net/ethernet/chelsio/cxgb/sge.c
855
ce = q->centries;
drivers/net/ethernet/chelsio/cxgb/sge.c
856
e = q->entries;
drivers/net/ethernet/chelsio/cxgb/sge.c
858
q->credits++;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1170
struct sge_rspq *q = &adap->sge.qs[i].rspq;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1172
spin_lock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1173
spin_unlock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1956
const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1962
e->rx_pending = q->fl_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1963
e->rx_mini_pending = q->rspq_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1964
e->rx_jumbo_pending = q->jumbo_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1965
e->tx_pending = q->txq_size[0];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1974
struct qset_params *q;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1990
q = &adapter->params.sge.qset[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1991
for (i = 0; i < pi->nqsets; ++i, ++q) {
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1992
q->rspq_size = e->rx_mini_pending;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1993
q->fl_size = e->rx_pending;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1994
q->jumbo_size = e->rx_jumbo_pending;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1995
q->txq_size[0] = e->tx_pending;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1996
q->txq_size[1] = e->tx_pending;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1997
q->txq_size[2] = e->tx_pending;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2031
struct qset_params *q = adapter->params.sge.qset;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2033
c->rx_coalesce_usecs = q->coalesce_usecs;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2151
struct qset_params *q;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2201
q = &adapter->params.sge.qset[t.qset_idx];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2204
q->rspq_size = t.rspq_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2206
q->fl_size = t.fl_size[0];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2208
q->jumbo_size = t.fl_size[1];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2210
q->txq_size[0] = t.txq_size[0];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2212
q->txq_size[1] = t.txq_size[1];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2214
q->txq_size[2] = t.txq_size[2];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2216
q->cong_thres = t.cong_thres;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2221
q->coalesce_usecs = t.intr_lat;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2222
t3_update_qset_coalesce(qs, q);
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2226
q->polling = t.polling;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2234
q = &adapter->params.sge.
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2236
q->polling = t.polling;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2252
struct qset_params *q;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2277
q = &adapter->params.sge.qset[q1 + t.qset_idx];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2278
t.rspq_size = q->rspq_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2279
t.txq_size[0] = q->txq_size[0];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2280
t.txq_size[1] = q->txq_size[1];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2281
t.txq_size[2] = q->txq_size[2];
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2282
t.fl_size[0] = q->fl_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2283
t.fl_size[1] = q->jumbo_size;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2284
t.polling = q->polling;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2286
t.intr_lat = q->coalesce_usecs;
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
2287
t.cong_thres = q->cong_thres;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1045
static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1048
clear_bit(TXQ_LAST_PKT_DB, &q->flags);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1049
if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1050
set_bit(TXQ_LAST_PKT_DB, &q->flags);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1052
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1057
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1089
const struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1096
struct tx_sw_desc *sd = &q->sdesc[pidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1136
if (++pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1139
d = q->desc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1140
sd = q->sdesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1179
struct sge_txq *q, unsigned int ndesc,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1184
struct tx_desc *d = &q->desc[pidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1215
q->sdesc[pidx].skb = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1228
V_WR_TID(q->token));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1240
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1242
htonl(V_WR_TID(q->token)));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1246
struct sge_qset *qs, struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1250
q->stops++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1268
struct sge_txq *q;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1282
q = &qs->txq[TXQ_ETH];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1285
reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1287
credits = q->size - q->in_use;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1291
t3_stop_tx_queue(txq, qs, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1294
dev->name, q->cntxt_id & 7);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1306
q->in_use += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1307
if (unlikely(credits - ndesc < q->stop_thres)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1308
t3_stop_tx_queue(txq, qs, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1310
if (should_restart_tx(q) &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
1312
q->restarts++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1317
gen = q->gen;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1318
q->unacked += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1319
compl = (q->unacked & 8) << (S_WR_COMPL - 3);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1320
q->unacked &= 7;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1321
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1322
q->pidx += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1323
if (q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1324
q->pidx -= q->size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1325
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1363
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1364
check_ring_tx_db(adap, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1418
static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1422
if (unlikely(!skb_queue_empty(&q->sendq))) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1423
addq_exit:__skb_queue_tail(&q->sendq, skb);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1426
if (unlikely(q->size - q->in_use < ndesc)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1427
struct sge_qset *qs = txq_to_qset(q, qid);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1432
if (should_restart_tx(q) &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
1436
q->stops++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1450
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1452
unsigned int reclaim = q->processed - q->cleaned;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1454
q->in_use -= reclaim;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1455
q->cleaned += reclaim;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1473
static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1486
wrp->wr_lo = htonl(V_WR_TID(q->token));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1488
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1489
again:reclaim_completed_tx_imm(q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1491
ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1494
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1500
write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1502
q->in_use++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1503
if (++q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1504
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1505
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1507
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1510
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1525
struct sge_txq *q = &qs->txq[TXQ_CTRL];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1527
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1528
again:reclaim_completed_tx_imm(q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1530
while (q->in_use < q->size &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
1531
(skb = __skb_dequeue(&q->sendq)) != NULL) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1533
write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1535
if (++q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1536
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1537
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1539
q->in_use++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1542
if (!skb_queue_empty(&q->sendq)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1546
if (should_restart_tx(q) &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
1549
q->stops++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1552
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1555
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1630
struct sge_txq *q, unsigned int pidx,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1637
struct tx_desc *d = &q->desc[pidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1640
q->sdesc[pidx].skb = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1661
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
drivers/net/ethernet/chelsio/cxgb3/sge.c
169
static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1694
static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1700
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1701
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1703
ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1707
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
171
return container_of(q, struct sge_qset, rspq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1715
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1719
gen = q->gen;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1720
q->in_use += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1721
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1722
q->pidx += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1723
if (q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1724
q->pidx -= q->size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1725
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1727
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1729
write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1730
check_ring_tx_db(adap, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
174
static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1745
struct sge_txq *q = &qs->txq[TXQ_OFLD];
drivers/net/ethernet/chelsio/cxgb3/sge.c
1750
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1751
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1753
while ((skb = skb_peek(&q->sendq)) != NULL) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1757
if (unlikely(q->size - q->in_use < ndesc)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
176
return container_of(q, struct sge_qset, txq[qidx]);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1761
if (should_restart_tx(q) &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
1764
q->stops++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1772
gen = q->gen;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1773
q->in_use += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1774
pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1775
q->pidx += ndesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1777
if (q->pidx >= q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
1778
q->pidx -= q->size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1779
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1781
__skb_unlink(skb, &q->sendq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1782
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1784
write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1786
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1788
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1791
set_bit(TXQ_RUNNING, &q->flags);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1792
set_bit(TXQ_LAST_PKT_DB, &q->flags);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1797
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1853
static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1855
int was_empty = skb_queue_empty(&q->rx_queue);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1857
__skb_queue_tail(&q->rx_queue, skb);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1860
struct sge_qset *qs = rspq_to_qset(q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1876
struct sge_rspq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
1880
q->offload_bundles++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
189
const struct sge_rspq *q, unsigned int credits)
drivers/net/ethernet/chelsio/cxgb3/sge.c
1899
struct sge_rspq *q = &qs->rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
1908
spin_lock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1910
skb_queue_splice_init(&q->rx_queue, &queue);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1913
spin_unlock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1916
spin_unlock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1928
q->offload_bundles++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
193
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
drivers/net/ethernet/chelsio/cxgb3/sge.c
1936
spin_lock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1937
skb_queue_splice(&queue, &q->rx_queue);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1938
spin_unlock_irq(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
1940
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2287
const struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
2289
return (r->intr_gen & F_RSPD_GEN2) == q->gen;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2292
static inline void clear_rspq_bufstate(struct sge_rspq * const q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
2294
q->pg_skb = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2295
q->rx_recycle_buf = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2325
struct sge_rspq *q = &qs->rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2326
struct rsp_desc *r = &q->desc[q->cidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
233
static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
2332
q->next_holdoff = q->holdoff_tmr;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2334
while (likely(budget_left && is_new_response(r, q))) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
2355
q->async_notif++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2360
q->next_holdoff = NOMEM_INTR_DELAY;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2361
q->nomem++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2366
q->imm_data++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
237
struct tx_sw_desc *d = &q->sdesc[cidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
2386
skb = get_packet_pg(adap, fl, q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
2390
q->pg_skb = skb;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2397
q->rx_drops++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
240
sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
drivers/net/ethernet/chelsio/cxgb3/sge.c
2404
q->pure_rsps++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2412
if (unlikely(++q->cidx == q->size)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
2413
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2414
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2415
r = q->desc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2419
if (++q->credits >= (q->size / 4)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
2420
refill_rspq(adap, q, q->credits);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2421
q->credits = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2430
rx_eth(adap, q, skb, ethpad, lro);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2432
q->offload_pkts++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2436
ngathered = rx_offload(&adap->tdev, q, skb,
drivers/net/ethernet/chelsio/cxgb3/sge.c
2442
clear_rspq_bufstate(q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2447
deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2521
struct sge_rspq *q = &qs->rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2528
if (unlikely(++q->cidx == q->size)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
2529
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2530
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2531
r = q->desc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2540
q->pure_rsps++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2541
if (++q->credits >= (q->size / 4)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
2542
refill_rspq(adap, q, q->credits);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2543
q->credits = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2545
if (!is_new_response(r, q))
drivers/net/ethernet/chelsio/cxgb3/sge.c
2557
return is_new_response(r, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2575
static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
2577
struct sge_qset *qs = rspq_to_qset(q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2578
struct rsp_desc *r = &q->desc[q->cidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
2580
if (!is_new_response(r, q))
drivers/net/ethernet/chelsio/cxgb3/sge.c
2584
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
drivers/net/ethernet/chelsio/cxgb3/sge.c
2585
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
drivers/net/ethernet/chelsio/cxgb3/sge.c
2600
struct sge_rspq *q = &qs->rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2602
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2604
q->unhandled_irqs++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2605
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
drivers/net/ethernet/chelsio/cxgb3/sge.c
2606
V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
drivers/net/ethernet/chelsio/cxgb3/sge.c
2607
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2618
struct sge_rspq *q = &qs->rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2620
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2622
if (handle_responses(qs->adap, q) < 0)
drivers/net/ethernet/chelsio/cxgb3/sge.c
2623
q->unhandled_irqs++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2624
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2638
struct sge_rspq *q = &adap->sge.qs[0].rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2640
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2643
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
drivers/net/ethernet/chelsio/cxgb3/sge.c
2644
V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
drivers/net/ethernet/chelsio/cxgb3/sge.c
2659
q->unhandled_irqs++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
266
d = cidx + 1 == q->size ? q->sdesc : d + 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2661
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2667
struct sge_rspq *q = &qs->rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2669
return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2683
struct sge_rspq *q = &adap->sge.qs[0].rspq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2685
spin_lock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
2691
q->unhandled_irqs++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
2693
spin_unlock(&q->lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
282
static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
287
unsigned int cidx = q->cidx;
drivers/net/ethernet/chelsio/cxgb3/sge.c
290
q->cntxt_id >= FW_TUNNEL_SGEEC_START;
drivers/net/ethernet/chelsio/cxgb3/sge.c
292
d = &q->sdesc[cidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
296
unmap_skb(d->skb, q, cidx, pdev);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3027
struct sge_qset *q = &adapter->sge.qs[id];
drivers/net/ethernet/chelsio/cxgb3/sge.c
3029
init_qset_cntxt(q, id);
drivers/net/ethernet/chelsio/cxgb3/sge.c
303
if (++cidx == q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
3030
timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3031
timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3033
q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3036
&q->fl[0].phys_addr, &q->fl[0].sdesc);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3037
if (!q->fl[0].desc)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3040
q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3043
&q->fl[1].phys_addr, &q->fl[1].sdesc);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3044
if (!q->fl[1].desc)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3047
q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3049
&q->rspq.phys_addr, NULL);
drivers/net/ethernet/chelsio/cxgb3/sge.c
305
d = q->sdesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3050
if (!q->rspq.desc)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3060
q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
drivers/net/ethernet/chelsio/cxgb3/sge.c
3062
&q->txq[i].phys_addr,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3063
&q->txq[i].sdesc);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3064
if (!q->txq[i].desc)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3067
q->txq[i].gen = 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3068
q->txq[i].size = p->txq_size[i];
drivers/net/ethernet/chelsio/cxgb3/sge.c
3069
spin_lock_init(&q->txq[i].lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3070
skb_queue_head_init(&q->txq[i].sendq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3073
INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3074
INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3076
q->fl[0].gen = q->fl[1].gen = 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3077
q->fl[0].size = p->fl_size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3078
q->fl[1].size = p->jumbo_size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
308
q->cidx = cidx;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3080
q->rspq.gen = 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3081
q->rspq.size = p->rspq_size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3082
spin_lock_init(&q->rspq.lock);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3083
skb_queue_head_init(&q->rspq.rx_queue);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3085
q->txq[TXQ_ETH].stop_thres = nports *
drivers/net/ethernet/chelsio/cxgb3/sge.c
3089
q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3091
q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3094
q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3096
q->fl[1].buf_size = is_offload(adapter) ?
drivers/net/ethernet/chelsio/cxgb3/sge.c
3101
q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3102
q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3103
q->fl[0].order = FL0_PG_ORDER;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3104
q->fl[1].order = FL1_PG_ORDER;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3105
q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3106
q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3111
ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3112
q->rspq.phys_addr, q->rspq.size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3113
q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3118
ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3119
q->fl[i].phys_addr, q->fl[i].size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3120
q->fl[i].buf_size - SGE_PG_RSVD,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3126
ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3127
SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3128
q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3134
ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3136
q->txq[TXQ_OFLD].phys_addr,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3137
q->txq[TXQ_OFLD].size, 0, 1, 0);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3143
ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3145
q->txq[TXQ_CTRL].phys_addr,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3146
q->txq[TXQ_CTRL].size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3147
q->txq[TXQ_CTRL].token, 1, 0);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3154
q->adap = adapter;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3155
q->netdev = dev;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3156
q->tx_q = netdevq;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3157
t3_update_qset_coalesce(q, p);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3159
avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3166
if (avail < q->fl[0].size)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3170
avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3172
if (avail < q->fl[1].size)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3175
refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3177
t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
drivers/net/ethernet/chelsio/cxgb3/sge.c
3178
V_NEWTIMER(q->rspq.holdoff_tmr));
drivers/net/ethernet/chelsio/cxgb3/sge.c
3185
t3_free_qset(adapter, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3200
struct sge_qset *q = &adap->sge.qs[i];
drivers/net/ethernet/chelsio/cxgb3/sge.c
3202
if (q->tx_reclaim_timer.function)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3203
mod_timer(&q->tx_reclaim_timer,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3206
if (q->rx_reclaim_timer.function)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3207
mod_timer(&q->rx_reclaim_timer,
drivers/net/ethernet/chelsio/cxgb3/sge.c
322
struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
3223
struct sge_qset *q = &adap->sge.qs[i];
drivers/net/ethernet/chelsio/cxgb3/sge.c
3225
if (q->tx_reclaim_timer.function)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3226
timer_delete_sync(&q->tx_reclaim_timer);
drivers/net/ethernet/chelsio/cxgb3/sge.c
3227
if (q->rx_reclaim_timer.function)
drivers/net/ethernet/chelsio/cxgb3/sge.c
3228
timer_delete_sync(&q->rx_reclaim_timer);
drivers/net/ethernet/chelsio/cxgb3/sge.c
325
unsigned int reclaim = q->processed - q->cleaned;
drivers/net/ethernet/chelsio/cxgb3/sge.c
329
free_tx_desc(adapter, q, reclaim);
drivers/net/ethernet/chelsio/cxgb3/sge.c
330
q->cleaned += reclaim;
drivers/net/ethernet/chelsio/cxgb3/sge.c
331
q->in_use -= reclaim;
drivers/net/ethernet/chelsio/cxgb3/sge.c
333
return q->processed - q->cleaned;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3356
struct qset_params *q = p->qset + i;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3358
q->polling = adap->params.rev > 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3359
q->coalesce_usecs = 5;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3360
q->rspq_size = 1024;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3361
q->fl_size = 1024;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3362
q->jumbo_size = 512;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3363
q->txq_size[TXQ_ETH] = 1024;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3364
q->txq_size[TXQ_OFLD] = 1024;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3365
q->txq_size[TXQ_CTRL] = 256;
drivers/net/ethernet/chelsio/cxgb3/sge.c
3366
q->cong_thres = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
342
static inline int should_restart_tx(const struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
344
unsigned int r = q->processed - q->cleaned;
drivers/net/ethernet/chelsio/cxgb3/sge.c
346
return q->in_use - r < (q->size >> 1);
drivers/net/ethernet/chelsio/cxgb3/sge.c
349
static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
352
if (q->use_pages && d->pg_chunk.page) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
356
q->alloc_size, DMA_FROM_DEVICE);
drivers/net/ethernet/chelsio/cxgb3/sge.c
362
q->buf_size, DMA_FROM_DEVICE);
drivers/net/ethernet/chelsio/cxgb3/sge.c
376
static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
378
unsigned int cidx = q->cidx;
drivers/net/ethernet/chelsio/cxgb3/sge.c
380
while (q->credits--) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
381
struct rx_sw_desc *d = &q->sdesc[cidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
384
clear_rx_desc(pdev, q, d);
drivers/net/ethernet/chelsio/cxgb3/sge.c
385
if (++cidx == q->size)
drivers/net/ethernet/chelsio/cxgb3/sge.c
389
if (q->pg_chunk.page) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
390
__free_pages(q->pg_chunk.page, q->order);
drivers/net/ethernet/chelsio/cxgb3/sge.c
391
q->pg_chunk.page = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
438
static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
442
if (!q->pg_chunk.page) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
445
q->pg_chunk.page = alloc_pages(gfp, order);
drivers/net/ethernet/chelsio/cxgb3/sge.c
446
if (unlikely(!q->pg_chunk.page))
drivers/net/ethernet/chelsio/cxgb3/sge.c
448
q->pg_chunk.va = page_address(q->pg_chunk.page);
drivers/net/ethernet/chelsio/cxgb3/sge.c
449
q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
drivers/net/ethernet/chelsio/cxgb3/sge.c
451
q->pg_chunk.offset = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
452
mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
drivers/net/ethernet/chelsio/cxgb3/sge.c
453
0, q->alloc_size, DMA_FROM_DEVICE);
drivers/net/ethernet/chelsio/cxgb3/sge.c
455
__free_pages(q->pg_chunk.page, order);
drivers/net/ethernet/chelsio/cxgb3/sge.c
456
q->pg_chunk.page = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
459
q->pg_chunk.mapping = mapping;
drivers/net/ethernet/chelsio/cxgb3/sge.c
461
sd->pg_chunk = q->pg_chunk;
drivers/net/ethernet/chelsio/cxgb3/sge.c
465
q->pg_chunk.offset += q->buf_size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
466
if (q->pg_chunk.offset == (PAGE_SIZE << order))
drivers/net/ethernet/chelsio/cxgb3/sge.c
467
q->pg_chunk.page = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
469
q->pg_chunk.va += q->buf_size;
drivers/net/ethernet/chelsio/cxgb3/sge.c
470
get_page(q->pg_chunk.page);
drivers/net/ethernet/chelsio/cxgb3/sge.c
481
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
483
if (q->pend_cred >= q->credits / 4) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
484
q->pend_cred = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
486
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb3/sge.c
501
static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
drivers/net/ethernet/chelsio/cxgb3/sge.c
503
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
504
struct rx_desc *d = &q->desc[q->pidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
511
if (q->use_pages) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
512
if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
drivers/net/ethernet/chelsio/cxgb3/sge.c
513
q->order))) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
514
nomem: q->alloc_failed++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
520
add_one_rx_chunk(mapping, d, q->gen);
drivers/net/ethernet/chelsio/cxgb3/sge.c
522
q->buf_size - SGE_PG_RSVD,
drivers/net/ethernet/chelsio/cxgb3/sge.c
527
struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
drivers/net/ethernet/chelsio/cxgb3/sge.c
533
err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
drivers/net/ethernet/chelsio/cxgb3/sge.c
534
q->gen, adap->pdev);
drivers/net/ethernet/chelsio/cxgb3/sge.c
536
clear_rx_desc(adap->pdev, q, sd);
drivers/net/ethernet/chelsio/cxgb3/sge.c
543
if (++q->pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
544
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
545
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
546
sd = q->sdesc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
547
d = q->desc;
drivers/net/ethernet/chelsio/cxgb3/sge.c
552
q->credits += count;
drivers/net/ethernet/chelsio/cxgb3/sge.c
553
q->pend_cred += count;
drivers/net/ethernet/chelsio/cxgb3/sge.c
554
ring_fl_db(adap, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
574
static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
drivers/net/ethernet/chelsio/cxgb3/sge.c
577
struct rx_desc *from = &q->desc[idx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
578
struct rx_desc *to = &q->desc[q->pidx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
580
q->sdesc[q->pidx] = q->sdesc[idx];
drivers/net/ethernet/chelsio/cxgb3/sge.c
584
to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
drivers/net/ethernet/chelsio/cxgb3/sge.c
585
to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
drivers/net/ethernet/chelsio/cxgb3/sge.c
587
if (++q->pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
588
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
589
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb3/sge.c
592
q->credits++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
593
q->pend_cred++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
594
ring_fl_db(adap, q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
643
static void t3_reset_qset(struct sge_qset *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
645
if (q->adap &&
drivers/net/ethernet/chelsio/cxgb3/sge.c
646
!(q->adap->flags & NAPI_INIT)) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
647
memset(q, 0, sizeof(*q));
drivers/net/ethernet/chelsio/cxgb3/sge.c
651
q->adap = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
652
memset(&q->rspq, 0, sizeof(q->rspq));
drivers/net/ethernet/chelsio/cxgb3/sge.c
653
memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
drivers/net/ethernet/chelsio/cxgb3/sge.c
654
memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
drivers/net/ethernet/chelsio/cxgb3/sge.c
655
q->txq_stopped = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
656
q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
drivers/net/ethernet/chelsio/cxgb3/sge.c
657
q->rx_reclaim_timer.function = NULL;
drivers/net/ethernet/chelsio/cxgb3/sge.c
658
q->nomem = 0;
drivers/net/ethernet/chelsio/cxgb3/sge.c
659
napi_free_frags(&q->napi);
drivers/net/ethernet/chelsio/cxgb3/sge.c
672
static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
drivers/net/ethernet/chelsio/cxgb3/sge.c
678
if (q->fl[i].desc) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
680
t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
drivers/net/ethernet/chelsio/cxgb3/sge.c
682
free_rx_bufs(pdev, &q->fl[i]);
drivers/net/ethernet/chelsio/cxgb3/sge.c
683
kfree(q->fl[i].sdesc);
drivers/net/ethernet/chelsio/cxgb3/sge.c
685
q->fl[i].size *
drivers/net/ethernet/chelsio/cxgb3/sge.c
686
sizeof(struct rx_desc), q->fl[i].desc,
drivers/net/ethernet/chelsio/cxgb3/sge.c
687
q->fl[i].phys_addr);
drivers/net/ethernet/chelsio/cxgb3/sge.c
691
if (q->txq[i].desc) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
693
t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
drivers/net/ethernet/chelsio/cxgb3/sge.c
695
if (q->txq[i].sdesc) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
696
free_tx_desc(adapter, &q->txq[i],
drivers/net/ethernet/chelsio/cxgb3/sge.c
697
q->txq[i].in_use);
drivers/net/ethernet/chelsio/cxgb3/sge.c
698
kfree(q->txq[i].sdesc);
drivers/net/ethernet/chelsio/cxgb3/sge.c
701
q->txq[i].size *
drivers/net/ethernet/chelsio/cxgb3/sge.c
703
q->txq[i].desc, q->txq[i].phys_addr);
drivers/net/ethernet/chelsio/cxgb3/sge.c
704
__skb_queue_purge(&q->txq[i].sendq);
drivers/net/ethernet/chelsio/cxgb3/sge.c
707
if (q->rspq.desc) {
drivers/net/ethernet/chelsio/cxgb3/sge.c
709
t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
drivers/net/ethernet/chelsio/cxgb3/sge.c
712
q->rspq.size * sizeof(struct rsp_desc),
drivers/net/ethernet/chelsio/cxgb3/sge.c
713
q->rspq.desc, q->rspq.phys_addr);
drivers/net/ethernet/chelsio/cxgb3/sge.c
716
t3_reset_qset(q);
drivers/net/ethernet/chelsio/cxgb3/sge.c
840
struct sge_rspq *q, unsigned int len,
drivers/net/ethernet/chelsio/cxgb3/sge.c
848
newskb = skb = q->pg_skb;
drivers/net/ethernet/chelsio/cxgb3/sge.c
863
q->rx_recycle_buf++;
drivers/net/ethernet/chelsio/cxgb3/sge.c
867
if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3395
#define QDESC_GET(q, desc, type, label) do { \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3400
cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3407
#define QDESC_GET_TXQ(q, type, label) do { \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3408
struct sge_txq *txq = (struct sge_txq *)q; \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3412
#define QDESC_GET_RXQ(q, type, label) do { \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3413
struct sge_rspq *rxq = (struct sge_rspq *)q; \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3417
#define QDESC_GET_FLQ(q, type, label) do { \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3418
struct sge_fl *flq = (struct sge_fl *)q; \
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3424
QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3436
QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3445
QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3460
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3516
QDESC_GET_TXQ(&s->eohw_txq[i].q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1598
const struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1600
unsigned int idx = q->intr_params >> 1;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1615
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1641
int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1752
int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1754
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1758
q->adap = adap;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1759
cxgb4_set_rspq_intr_params(q, us, cnt);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1760
q->iqe_len = iqe_size;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1761
q->size = size;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1881
const struct sge_rspq *q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2100
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2106
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2108
void free_txq(struct adapter *adap, struct sge_txq *q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2110
struct sge_txq *q, bool unmap);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2113
void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2115
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2118
void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2121
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2149
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
2150
void cxgb4_quiesce_rx(struct sge_rspq *q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
762
typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
764
typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
865
struct sge_txq q;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
880
struct sge_txq q;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
890
struct sge_txq q;
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
955
struct sge_txq q; /* HW Txq */
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2694
T("TxQ ID:", q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2695
T("TxQ size:", q.size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2696
T("TxQ inuse:", q.in_use);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2697
T("TxQ CIDX:", q.cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2698
T("TxQ PIDX:", q.pidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2732
TL("TxQFull:", q.stops);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2733
TL("TxQRestarts:", q.restarts);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2817
T("TxQ ID:", q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2818
T("TxQ size:", q.size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2819
T("TxQ inuse:", q.in_use);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2820
T("TxQ CIDX:", q.cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2821
T("TxQ PIDX:", q.pidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2843
TL("TxQFull:", q.stops);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2844
TL("TxQRestarts:", q.restarts);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2925
T("TxQ ID:", q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2926
T("TxQ size:", q.size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2927
T("TxQ inuse:", q.in_use);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2928
T("TxQ CIDX:", q.cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
2929
T("TxQ PIDX:", q.pidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3076
T("TxQ ID:", q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3077
T("TxQ size:", q.size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3078
T("TxQ inuse:", q.in_use);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3079
T("TxQ CIDX:", q.cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3080
T("TxQ PIDX:", q.pidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3108
T("TxQ ID:", q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3109
T("TxQ size:", q.size);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3110
T("TxQ inuse:", q.in_use);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3111
T("TxQ CIDX:", q.cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3112
T("TxQ PIDX:", q.pidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3113
TL("TxQFull:", q.stops);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
3114
TL("TxQRestarts:", q.restarts);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1098
FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
908
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
931
s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
952
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
954
for (i = 0; i < pi->nqsets; i++, q++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
955
err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
967
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
969
for (i = 0; i < pi->nqsets; i++, q++)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
970
q->rspq.adaptive_rx = adaptive_rx;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
979
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
981
return q->rspq.adaptive_rx;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1061
struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1064
for (j = 0; j < pi->nqsets; j++, q++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1075
q->msix = &adap->msix_info[msix];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1078
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1079
msix, &q->fl,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1086
q->rspq.idx = j;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1087
memset(&q->stats, 0, sizeof(q->stats));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1090
q = &s->ethrxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1091
for (j = 0; j < pi->nqsets; j++, t++, q++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1094
q->rspq.cntxt_id,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1240
int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1243
struct adapter *adap = q->adap;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1253
if (q->desc && q->pktcnt_idx != new_idx) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1258
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1264
q->pktcnt_idx = new_idx;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1268
q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2415
static void disable_txq_db(struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2419
spin_lock_irqsave(&q->db_lock, flags);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2420
q->db_disabled = 1;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2421
spin_unlock_irqrestore(&q->db_lock, flags);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2424
static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2426
spin_lock_irq(&q->db_lock);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2427
if (q->db_pidx_inc) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2433
QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2434
q->db_pidx_inc = 0;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2436
q->db_disabled = 0;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2437
spin_unlock_irq(&q->db_lock);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2445
disable_txq_db(&adap->sge.ethtxq[i].q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2454
disable_txq_db(&txq->q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2459
disable_txq_db(&adap->sge.ctrlq[i].q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2467
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2476
enable_txq_db(adap, &txq->q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2481
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
251
FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2510
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2515
spin_lock_irq(&q->db_lock);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2516
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2519
if (q->db_pidx != hw_pidx) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2523
if (q->db_pidx >= hw_pidx)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2524
delta = q->db_pidx - hw_pidx;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2526
delta = q->size - hw_pidx + q->db_pidx;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2534
QID_V(q->cntxt_id) | val);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2537
q->db_disabled = 0;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2538
q->db_pidx_inc = 0;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2539
spin_unlock_irq(&q->db_lock);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2549
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2557
sync_txq_pidx(adap, &txq->q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
2562
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
555
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
570
dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5791
s->ethtxq[i].q.size = 1024;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5794
s->ctrlq[i].q.size = 512;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5797
s->ptptxq.q.size = 8;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
581
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
586
eq = container_of(txq, struct sge_eth_txq, q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
587
t4_sge_eth_txq_egress_update(q->adap, eq, -1);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
591
oq = container_of(txq, struct sge_uld_txq, q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
611
dev = q->adap->port[q->adap->chan_map[port]];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
625
dcb_rpl(q->adap, pcmd);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
629
t4_handle_fw_rpl(q->adap, p->data);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
633
do_l2t_write_rpl(q->adap, p);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
637
do_smt_write_rpl(q->adap, p);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
641
filter_rpl(q->adap, p);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
645
hash_filter_rpl(q->adap, p);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
649
hash_del_filter_rpl(q->adap, p);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
653
do_srq_table_rpl(q->adap, p);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
655
dev_err(q->adap->pdev_dev,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
913
void cxgb4_quiesce_rx(struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
915
if (q->handler)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
916
napi_disable(&q->napi);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
927
struct sge_rspq *q = adap->sge.ingr_map[i];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
929
if (!q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
932
cxgb4_quiesce_rx(q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
954
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
956
if (q->handler)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
957
napi_enable(&q->napi);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
961
SEINTARM_V(q->intr_params) |
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
962
INGRESSQID_V(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
973
struct sge_rspq *q = adap->sge.ingr_map[i];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
975
if (!q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
978
cxgb4_enable_rx(adap, q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
213
eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
112
struct sge_ofld_rxq *q = rxq_info->uldrxq;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
125
for (i = 0; i < nq; i++, q++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
144
q->msix = &adap->msix_info[msi_idx];
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
146
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
149
q->fl.size ? &q->fl : NULL,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
156
memset(&q->stats, 0, sizeof(q->stats));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
158
ids[i] = q->rspq.abs_id;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
162
q = rxq_info->uldrxq;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
163
for ( ; i; i--, q++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
164
if (q->rspq.desc)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
165
free_rspq_fl(adap, &q->rspq,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
166
q->fl.size ? &q->fl : NULL);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
167
if (q->msix)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
168
cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
194
FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
203
struct sge_ofld_rxq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
205
for ( ; n; n--, q++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
206
if (q->rspq.desc)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
207
free_rspq_fl(adap, &q->rspq,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
208
q->fl.size ? &q->fl : NULL);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
225
FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
375
struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
377
if (!q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
380
cxgb4_enable_rx(adap, q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
390
struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
392
if (!q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
395
cxgb4_quiesce_rx(q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
408
if (txq->q.desc) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
411
txq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
412
free_tx_desc(adap, &txq->q, txq->q.in_use, false);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
413
kfree(txq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
415
free_txq(adap, &txq->q);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
432
txq->q.size = 1024;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
57
static void uldrx_flush_handler(struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
59
struct adapter *adap = q->adap;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
61
if (adap->uld[q->uld].lro_flush)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
62
adap->uld[q->uld].lro_flush(&q->lro_mgr);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
74
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
77
struct adapter *adap = q->adap;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
78
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
86
if (q->flush_handler)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
87
ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
88
rsp, gl, &q->lro_mgr,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
89
&q->napi);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
91
ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
drivers/net/ethernet/chelsio/cxgb4/sched.c
181
qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/sched.c
199
qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/sched.c
233
qid = txq->q.cntxt_id;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1011
inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1021
if (unlikely(q->bar2_addr == NULL)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
1028
spin_lock_irqsave(&q->db_lock, flags);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1029
if (!q->db_disabled)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1031
QID_V(q->cntxt_id) | val);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1033
q->db_pidx_inc += n;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1034
q->db_pidx = q->pidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1035
spin_unlock_irqrestore(&q->db_lock, flags);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1051
if (n == 1 && q->bar2_qid == 0) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
1052
int index = (q->pidx
drivers/net/ethernet/chelsio/cxgb4/sge.c
1053
? (q->pidx - 1)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1054
: (q->size - 1));
drivers/net/ethernet/chelsio/cxgb4/sge.c
1055
u64 *wr = (u64 *)&q->desc[index];
drivers/net/ethernet/chelsio/cxgb4/sge.c
1058
(q->bar2_addr + SGE_UDB_WCDOORBELL),
drivers/net/ethernet/chelsio/cxgb4/sge.c
1061
writel(val | QID_V(q->bar2_qid),
drivers/net/ethernet/chelsio/cxgb4/sge.c
1062
q->bar2_addr + SGE_UDB_KDOORBELL);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1092
const struct sge_txq *q, void *pos)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1094
int left = (void *)q->stat - pos;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1105
skb_copy_bits(skb, left, q->desc, skb->len - left);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1106
pos = (void *)q->desc + (skb->len - left);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1117
const struct sge_txq *q, void *pos,
drivers/net/ethernet/chelsio/cxgb4/sge.c
1121
int left = (void *)q->stat - pos;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1128
memcpy(q->desc, skb->data + left, length - left);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1129
pos = (void *)q->desc + (length - left);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1218
static void eth_txq_stop(struct sge_eth_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1220
netif_tx_stop_queue(q->txq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1221
q->q.stops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1224
static inline void txq_advance(struct sge_txq *q, unsigned int n)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1226
q->in_use += n;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1227
q->pidx += n;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1228
if (q->pidx >= q->size)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1229
q->pidx -= q->size;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1408
struct sge_txq *q = &eq->q;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1411
if (!q->in_use || !__netif_tx_trylock(eq->txq))
drivers/net/ethernet/chelsio/cxgb4/sge.c
1415
reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1417
hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
drivers/net/ethernet/chelsio/cxgb4/sge.c
1418
hw_in_use = q->pidx - hw_cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1420
hw_in_use += q->size;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1427
if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
1429
eq->q.restarts++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1502
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1532
q = &adap->sge.ptptxq;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1534
q = &adap->sge.ethtxq[qidx + pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/sge.c
1537
reclaim_completed_tx(adap, &q->q, -1, true);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1549
credits = txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1552
eth_txq_stop(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1565
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1566
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1567
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1568
sgl_sdesc = &q->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/cxgb4/sge.c
1573
q->mapping_err++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1587
eth_txq_stop(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1592
wr = (void *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
1593
eowr = (void *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
1634
q->tso++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1635
q->tx_cso += ssi->gso_segs;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1648
sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
drivers/net/ethernet/chelsio/cxgb4/sge.c
1651
left = (u8 *)end - (u8 *)q->q.stat;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1652
end = (void *)q->q.desc + left;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1655
q->uso++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1656
q->tx_cso += ssi->gso_segs;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1669
q->tx_cso++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1673
if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
1678
left = (u8 *)end - (u8 *)q->q.stat;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1679
end = (void *)q->q.desc + left;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1680
sgl = (void *)q->q.desc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1684
q->vlan_ins++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1699
ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1701
ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1711
cxgb4_inline_tx_skb(skb, &q->q, sgl);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1714
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
drivers/net/ethernet/chelsio/cxgb4/sge.c
1720
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1722
cxgb4_ring_tx_db(adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1850
reclaim_completed_tx(adapter, &txq->q, -1, true);
drivers/net/ethernet/chelsio/cxgb4/sge.c
1858
credits = txq_avail(&txq->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1873
last_desc = txq->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1874
if (last_desc >= txq->q.size)
drivers/net/ethernet/chelsio/cxgb4/sge.c
1875
last_desc -= txq->q.size;
drivers/net/ethernet/chelsio/cxgb4/sge.c
1876
sgl_sdesc = &txq->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/cxgb4/sge.c
1912
wr = (void *)&txq->q.desc[txq->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2012
cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2052
struct sge_txq *tq = &txq->q;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2074
txq_advance(&txq->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2076
cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
208
static inline unsigned int txq_avail(const struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2095
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2097
int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
drivers/net/ethernet/chelsio/cxgb4/sge.c
2098
int reclaim = hw_cidx - q->cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
210
return q->size - 1 - q->in_use;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2101
reclaim += q->size;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2103
q->in_use -= reclaim;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2104
q->cidx = hw_cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2268
reclaim_completed_tx_imm(&eohw_txq->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2273
wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2293
left = txq_avail(&eohw_txq->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2331
sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
drivers/net/ethernet/chelsio/cxgb4/sge.c
2343
left = (u8 *)end - (u8 *)eohw_txq->q.stat;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2344
end = (void *)eohw_txq->q.desc + left;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2347
if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
2352
left = (u8 *)end - (u8 *)eohw_txq->q.stat;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2354
end = (void *)eohw_txq->q.desc + left;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2355
sgl = (void *)eohw_txq->q.desc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2358
cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
drivers/net/ethernet/chelsio/cxgb4/sge.c
2375
txq_advance(&eohw_txq->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2377
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2640
static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2642
reclaim_completed_tx_imm(&q->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2643
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
2645
q->q.stops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2646
q->full = 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2661
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2673
q = &adap->sge.ethtxq[pi->first_qset];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2674
__netif_tx_lock_bh(q->txq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2676
reclaim_completed_tx(adap, &q->q, -1, true);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2677
credits = txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2679
__netif_tx_unlock_bh(q->txq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2683
wr = (void *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2712
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2713
cxgb4_ring_tx_db(adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2714
__netif_tx_unlock_bh(q->txq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2736
static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2748
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2750
if (unlikely(q->full)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
2752
__skb_queue_tail(&q->sendq, skb);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2753
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2757
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2758
cxgb4_inline_tx_skb(skb, &q->q, wr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2760
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2761
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
drivers/net/ethernet/chelsio/cxgb4/sge.c
2762
ctrlq_check_stop(q, wr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2764
cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2765
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2781
struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2783
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2784
reclaim_completed_tx_imm(&q->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2785
BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
drivers/net/ethernet/chelsio/cxgb4/sge.c
2787
while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
2795
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2796
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2797
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2799
cxgb4_inline_tx_skb(skb, &q->q, wr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2802
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
2803
unsigned long old = q->q.stops;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2805
ctrlq_check_stop(q, wr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2806
if (q->q.stops != old) { /* suspended anew */
drivers/net/ethernet/chelsio/cxgb4/sge.c
2807
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2812
cxgb4_ring_tx_db(q->adap, &q->q, written);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2815
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2817
q->full = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2820
cxgb4_ring_tx_db(q->adap, &q->q, written);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2821
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2894
static void txq_stop_maperr(struct sge_uld_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2896
q->mapping_err++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2897
q->q.stops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2898
set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
drivers/net/ethernet/chelsio/cxgb4/sge.c
2899
q->adap->sge.txq_maperr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2910
static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2913
q->q.stops++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2914
q->full = 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2935
static void service_ofldq(struct sge_uld_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2936
__must_hold(&q->sendq.lock)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2952
if (q->service_ofldq_running)
drivers/net/ethernet/chelsio/cxgb4/sge.c
2954
q->service_ofldq_running = true;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2956
while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
2964
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2966
cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2970
credits = txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2973
ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2975
pos = (u64 *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
2977
cxgb4_inline_tx_skb(skb, &q->q, pos);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2978
else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
drivers/net/ethernet/chelsio/cxgb4/sge.c
2980
txq_stop_maperr(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2981
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
2991
txq = &q->q;
drivers/net/ethernet/chelsio/cxgb4/sge.c
2992
pos = (void *)inline_tx_skb_header(skb, &q->q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3010
cxgb4_write_sgl(skb, &q->q, (void *)pos,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3014
skb->dev = q->adap->port[0];
drivers/net/ethernet/chelsio/cxgb4/sge.c
3017
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3018
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3019
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3020
q->q.sdesc[last_desc].skb = skb;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3023
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3026
cxgb4_ring_tx_db(q->adap, &q->q, written);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3035
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3036
__skb_unlink(skb, &q->sendq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3041
cxgb4_ring_tx_db(q->adap, &q->q, written);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3046
q->service_ofldq_running = false;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3056
static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3059
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3069
__skb_queue_tail(&q->sendq, skb);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3070
if (q->sendq.qlen == 1)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3071
service_ofldq(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3073
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3085
struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3087
spin_lock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3088
q->full = 0; /* the queue actually is completely empty now */
drivers/net/ethernet/chelsio/cxgb4/sge.c
3089
service_ofldq(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3090
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
313
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
316
unsigned int cidx = q->cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3176
const struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3179
int left = (void *)q->stat - pos;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3187
memcpy(q->desc, src + left, length - left);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3188
pos = (void *)q->desc + (length - left);
drivers/net/ethernet/chelsio/cxgb4/sge.c
319
d = &q->sdesc[cidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
3207
static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3224
if (!spin_trylock(&q->sendq.lock))
drivers/net/ethernet/chelsio/cxgb4/sge.c
3227
if (q->full || !skb_queue_empty(&q->sendq) ||
drivers/net/ethernet/chelsio/cxgb4/sge.c
3228
q->service_ofldq_running) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3229
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3233
credits = txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3234
pos = (u64 *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
3237
inline_tx_header(src, &q->q, pos, len);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3239
ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3240
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3241
cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3243
spin_unlock(&q->sendq.lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
330
if (++cidx == q->size) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
332
d = q->sdesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
335
q->cidx = cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
341
static inline int reclaimable(const struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
343
int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
drivers/net/ethernet/chelsio/cxgb4/sge.c
344
hw_cidx -= q->cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
345
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
drivers/net/ethernet/chelsio/cxgb4/sge.c
359
static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
362
int reclaim = reclaimable(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3623
WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3662
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3668
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3669
struct adapter *adapter = q->adap;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3670
struct sge *s = &q->adap->sge;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3671
int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
drivers/net/ethernet/chelsio/cxgb4/sge.c
3677
pi = netdev_priv(q->netdev);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3683
t4_tx_completion_handler(q, rsp, si);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3688
return handle_trace_pkt(q->adap, si);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3692
if (q->adap->params.tp.rx_pkt_encap) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3700
(q->netdev->features & NETIF_F_RXCSUM);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3713
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3738
if (!t4_tx_hststamp(adapter, skb, q->netdev))
drivers/net/ethernet/chelsio/cxgb4/sge.c
374
free_tx_desc(adap, q, reclaim, unmap);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3742
skb->protocol = eth_type_trans(skb, q->netdev);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3743
skb_record_rx_queue(skb, q->idx);
drivers/net/ethernet/chelsio/cxgb4/sge.c
375
q->in_use -= reclaim;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3751
cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
drivers/net/ethernet/chelsio/cxgb4/sge.c
3778
if (q->adap->params.tp.rx_pkt_encap)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3796
skb_mark_napi_id(skb, &q->napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3816
static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3822
if (q->cidx == 0)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3823
q->cidx = q->size - 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3825
q->cidx--;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3826
d = &q->sdesc[q->cidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
3829
q->avail++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3842
const struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3844
return (r->type_gen >> RSPD_GEN_S) == q->gen;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3853
static inline void rspq_next(struct sge_rspq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3855
q->cur_desc = (void *)q->cur_desc + q->iqe_len;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3856
if (unlikely(++q->cidx == q->size)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3857
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3858
q->gen ^= 1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3859
q->cur_desc = q->desc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3876
static int process_responses(struct sge_rspq *q, int budget)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3881
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3882
struct adapter *adapter = q->adap;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3886
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
drivers/net/ethernet/chelsio/cxgb4/sge.c
3887
if (!is_new_response(rc, q)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3888
if (q->flush_handler)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3889
q->flush_handler(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3902
if (likely(q->offset > 0)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3903
free_rx_bufs(q->adap, &rxq->fl, 1);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3904
q->offset = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
391
void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3915
fp->offset = q->offset;
drivers/net/ethernet/chelsio/cxgb4/sge.c
3920
unmap_rx_buf(q->adap, &rxq->fl);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3929
dma_sync_single_for_cpu(q->adap->pdev_dev,
drivers/net/ethernet/chelsio/cxgb4/sge.c
3938
ret = q->handler(q, q->cur_desc, &si);
drivers/net/ethernet/chelsio/cxgb4/sge.c
394
(void)reclaim_completed_tx(adap, q, -1, unmap);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3940
q->offset += ALIGN(fp->size, s->fl_align);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3944
ret = q->handler(q, q->cur_desc, NULL);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3946
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3951
q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3955
rspq_next(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3959
if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
drivers/net/ethernet/chelsio/cxgb4/sge.c
3960
__refill_fl(q->adap, &rxq->fl);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3978
struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3982
work_done = process_responses(q, budget);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3987
timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
drivers/net/ethernet/chelsio/cxgb4/sge.c
3989
if (q->adaptive_rx) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
3997
q->next_intr_params =
drivers/net/ethernet/chelsio/cxgb4/sge.c
4000
params = q->next_intr_params;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4002
params = q->next_intr_params;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4003
q->next_intr_params = q->intr_params;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4013
if (unlikely(q->bar2_addr == NULL)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4014
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
drivers/net/ethernet/chelsio/cxgb4/sge.c
4015
val | INGRESSQID_V((u32)q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/sge.c
4017
writel(val | INGRESSQID_V(q->bar2_qid),
drivers/net/ethernet/chelsio/cxgb4/sge.c
4018
q->bar2_addr + SGE_UDB_GTS);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4056
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4075
q->adap->tids.eotid_base;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4076
entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4107
flits = ethofld_calc_tx_flits(q->adap, skb,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4136
struct sge_rspq *q = cookie;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4138
napi_schedule(&q->napi);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4150
struct sge_rspq *q = &adap->sge.intrq;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4155
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
drivers/net/ethernet/chelsio/cxgb4/sge.c
4156
if (!is_new_response(rc, q))
drivers/net/ethernet/chelsio/cxgb4/sge.c
4167
rspq_next(q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4170
val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4175
if (unlikely(q->bar2_addr == NULL)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4177
val | INGRESSQID_V(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/sge.c
4179
writel(val | INGRESSQID_V(q->bar2_qid),
drivers/net/ethernet/chelsio/cxgb4/sge.c
4180
q->bar2_addr + SGE_UDB_GTS);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4287
struct sge_eth_txq *q = &s->ptptxq;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4291
avail = reclaimable(&q->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4294
free_tx_desc(adap, &q->q, avail, false);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4295
q->q.in_use -= avail;
drivers/net/ethernet/chelsio/cxgb4/sge.c
438
static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
drivers/net/ethernet/chelsio/cxgb4/sge.c
441
struct rx_sw_desc *d = &q->sdesc[q->cidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
449
if (++q->cidx == q->size)
drivers/net/ethernet/chelsio/cxgb4/sge.c
450
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
451
q->avail--;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4553
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
drivers/net/ethernet/chelsio/cxgb4/sge.c
4555
q->cntxt_id = id;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4556
q->bar2_addr = bar2_address(adap,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4557
q->cntxt_id,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4559
&q->bar2_qid);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4560
q->in_use = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4561
q->cidx = q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4562
q->stops = q->restarts = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4563
q->stat = (void *)&q->desc[q->size];
drivers/net/ethernet/chelsio/cxgb4/sge.c
4564
spin_lock_init(&q->db_lock);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4565
adap->sge.egr_map[id - adap->sge.egr_start] = q;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4588
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4590
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4592
&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4594
if (!txq->q.desc)
drivers/net/ethernet/chelsio/cxgb4/sge.c
4633
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4648
kfree(txq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4649
txq->q.sdesc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4652
txq->q.desc, txq->q.phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4653
txq->q.desc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4657
txq->q.q_type = CXGB4_TXQ_ETH;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4658
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
drivers/net/ethernet/chelsio/cxgb4/sge.c
466
static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
468
struct rx_sw_desc *d = &q->sdesc[q->cidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
4681
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4683
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4684
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4686
if (!txq->q.desc)
drivers/net/ethernet/chelsio/cxgb4/sge.c
4708
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4714
txq->q.desc, txq->q.phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4715
txq->q.desc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4719
txq->q.q_type = CXGB4_TXQ_CTRL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4720
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
drivers/net/ethernet/chelsio/cxgb4/sge.c
474
if (++q->cidx == q->size)
drivers/net/ethernet/chelsio/cxgb4/sge.c
4740
static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
475
q->cidx = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4751
nentries = q->size + s->stat_len / sizeof(struct tx_desc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4752
q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
drivers/net/ethernet/chelsio/cxgb4/sge.c
4753
sizeof(struct tx_sw_desc), &q->phys_addr,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4754
&q->sdesc, s->stat_len, NUMA_NO_NODE);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4755
if (!q->desc)
drivers/net/ethernet/chelsio/cxgb4/sge.c
476
q->avail--;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4779
c.eqaddr = cpu_to_be64(q->phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4783
kfree(q->sdesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4784
q->sdesc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4787
q->desc, q->phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4788
q->desc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
479
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
4792
init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
drivers/net/ethernet/chelsio/cxgb4/sge.c
4806
ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
drivers/net/ethernet/chelsio/cxgb4/sge.c
481
if (q->pend_cred >= 8) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4810
txq->q.q_type = CXGB4_TXQ_ULD;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4824
ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4828
txq->q.q_type = CXGB4_TXQ_ULD;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4839
void free_txq(struct adapter *adap, struct sge_txq *q)
drivers/net/ethernet/chelsio/cxgb4/sge.c
4844
q->size * sizeof(struct tx_desc) + s->stat_len,
drivers/net/ethernet/chelsio/cxgb4/sge.c
4845
q->desc, q->phys_addr);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4846
q->cntxt_id = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4847
q->sdesc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
4848
q->desc = NULL;
drivers/net/ethernet/chelsio/cxgb4/sge.c
485
val |= PIDX_V(q->pend_cred / 8);
drivers/net/ethernet/chelsio/cxgb4/sge.c
487
val |= PIDX_T5_V(q->pend_cred / 8);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4880
if (txq->q.desc) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4882
txq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4883
free_tx_desc(adap, &txq->q, txq->q.in_use, false);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4884
kfree(txq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4885
free_txq(adap, &txq->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4924
if (etq->q.desc) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4926
etq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4928
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4930
kfree(etq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4931
free_txq(adap, &etq->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4939
if (cq->q.desc) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4942
cq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4944
free_txq(adap, &cq->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4963
if (etq->q.desc) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
4965
etq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4967
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4969
kfree(etq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4/sge.c
4970
free_txq(adap, &etq->q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
498
if (unlikely(q->bar2_addr == NULL)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
500
val | QID_V(q->cntxt_id));
drivers/net/ethernet/chelsio/cxgb4/sge.c
5012
if (txq->q.desc)
drivers/net/ethernet/chelsio/cxgb4/sge.c
502
writel(val | QID_V(q->bar2_qid),
drivers/net/ethernet/chelsio/cxgb4/sge.c
5026
if (txq->q.desc)
drivers/net/ethernet/chelsio/cxgb4/sge.c
503
q->bar2_addr + SGE_UDB_KDOORBELL);
drivers/net/ethernet/chelsio/cxgb4/sge.c
5035
if (cq->q.desc)
drivers/net/ethernet/chelsio/cxgb4/sge.c
510
q->pend_cred &= 7;
drivers/net/ethernet/chelsio/cxgb4/sge.c
535
static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
drivers/net/ethernet/chelsio/cxgb4/sge.c
541
unsigned int cred = q->avail;
drivers/net/ethernet/chelsio/cxgb4/sge.c
542
__be64 *d = &q->desc[q->pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
543
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
drivers/net/ethernet/chelsio/cxgb4/sge.c
547
if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
drivers/net/ethernet/chelsio/cxgb4/sge.c
563
q->large_alloc_failed++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
572
q->mapping_err++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
581
q->avail++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
582
if (++q->pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
583
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
584
sd = q->sdesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
585
d = q->desc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
594
q->alloc_failed++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
602
q->mapping_err++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
610
q->avail++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
611
if (++q->pidx == q->size) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
612
q->pidx = 0;
drivers/net/ethernet/chelsio/cxgb4/sge.c
613
sd = q->sdesc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
614
d = q->desc;
drivers/net/ethernet/chelsio/cxgb4/sge.c
618
out: cred = q->avail - cred;
drivers/net/ethernet/chelsio/cxgb4/sge.c
619
q->pend_cred += cred;
drivers/net/ethernet/chelsio/cxgb4/sge.c
620
ring_fl_db(adap, q);
drivers/net/ethernet/chelsio/cxgb4/sge.c
622
if (unlikely(fl_starving(adap, q))) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
624
q->low++;
drivers/net/ethernet/chelsio/cxgb4/sge.c
625
set_bit(q->cntxt_id - adap->sge.egr_start,
drivers/net/ethernet/chelsio/cxgb4/sge.c
823
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
852
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
drivers/net/ethernet/chelsio/cxgb4/sge.c
865
if (unlikely((u8 *)end > (u8 *)q->stat)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
866
unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
870
part1 = (u8 *)end - (u8 *)q->stat;
drivers/net/ethernet/chelsio/cxgb4/sge.c
871
memcpy(q->desc, (u8 *)buf + part0, part1);
drivers/net/ethernet/chelsio/cxgb4/sge.c
872
end = (void *)q->desc + part1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
892
void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
drivers/net/ethernet/chelsio/cxgb4/sge.c
941
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
drivers/net/ethernet/chelsio/cxgb4/sge.c
968
if (unlikely((u8 *)end > (u8 *)q->stat)) {
drivers/net/ethernet/chelsio/cxgb4/sge.c
969
u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
drivers/net/ethernet/chelsio/cxgb4/sge.c
973
part1 = (u8 *)end - (u8 *)q->stat;
drivers/net/ethernet/chelsio/cxgb4/sge.c
974
memcpy(q->desc, (u8 *)buf + part0, part1);
drivers/net/ethernet/chelsio/cxgb4/sge.c
975
end = (void *)q->desc + part1;
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
260
struct sge_txq q; /* SGE TX Queue */
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1609
rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
1643
s->ethtxq[qs].q.size = rp->tx_pending;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2076
T("TxQ ID:", q.abs_id);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2077
T("TxQ size:", q.size);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2078
T("TxQ inuse:", q.in_use);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2079
T("TxQ PIdx:", q.pidx);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2080
T("TxQ CIdx:", q.cidx);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2221
T("TxQFull:", q.stops);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2222
T("TxQRestarts:", q.restarts);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
2734
txq->q.size = 1024;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
558
txq = container_of(tq, struct sge_eth_txq, q);
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
570
txq->q.restarts++;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
657
s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
669
EQ_MAP(s, txq->q.abs_id) = &txq->q;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1136
txq->q.stops++;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1205
reclaim_completed_tx(adapter, &txq->q, true);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1214
credits = txq_avail(&txq->q) - ndesc;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1263
wr = (void *)&txq->q.desc[txq->q.pidx];
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1362
T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1364
ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1376
inline_tx_skb(skb, &txq->q, cpl + 1);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1417
struct sge_txq *tq = &txq->q;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1446
txq_advance(&txq->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
1448
ring_tx_db(adapter, &txq->q, ndesc);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2133
if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2134
int avail = reclaimable(&txq->q);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2139
free_tx_desc(adapter, &txq->q, avail, true);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2140
txq->q.in_use -= avail;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2424
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2430
txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2433
&txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2434
if (!txq->q.desc)
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2466
cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2478
kfree(txq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2479
txq->q.sdesc = NULL;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2482
txq->q.desc, txq->q.phys_addr);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2483
txq->q.desc = NULL;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2487
txq->q.in_use = 0;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2488
txq->q.cidx = 0;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2489
txq->q.pidx = 0;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2490
txq->q.stat = (void *)&txq->q.desc[txq->q.size];
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2491
txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2492
txq->q.bar2_addr = bar2_address(adapter,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2493
txq->q.cntxt_id,
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2495
&txq->q.bar2_qid);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2496
txq->q.abs_id =
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2502
txq->q.stops = 0;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2503
txq->q.restarts = 0;
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2573
if (txq->q.desc) {
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2574
t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2575
free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2576
kfree(txq->q.sdesc);
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
2577
free_txq(adapter, &txq->q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
413
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
426
q = &adap->sge.ethtxq[qidx + pi->first_qset];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
429
eoq = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
431
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
464
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
474
q = &adap->sge.ethtxq[qidx + pi->first_qset];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
476
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
478
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
486
q->vlan_ins++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
509
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
517
q = &adap->sge.ethtxq[qidx + pi->first_qset];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
521
eoq = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
524
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
525
left = 64 * q->q.size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
540
memcpy(q->q.desc, sa_entry->key + left,
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
542
pos = (u8 *)q->q.desc + (key_len - left);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
573
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
575
int qid = q->q.cntxt_id;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
599
netif_tx_stop_queue(q->txq);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
600
q->q.stops++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
601
if (!q->dbqt)
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
681
static unsigned int txq_avail(const struct sge_txq *q)
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
683
return q->size - 1 - q->in_use;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
686
static void eth_txq_stop(struct sge_eth_txq *q)
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
688
netif_tx_stop_queue(q->txq);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
689
q->q.stops++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
692
static void txq_advance(struct sge_txq *q, unsigned int n)
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
694
q->in_use += n;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
695
q->pidx += n;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
696
if (q->pidx >= q->size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
697
q->pidx -= q->size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
712
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
731
q = &adap->sge.ethtxq[qidx + pi->first_qset];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
733
cxgb4_reclaim_completed_tx(adap, &q->q, true);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
737
credits = txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
740
eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
743
dev->name, qidx, credits, ndesc, txq_avail(&q->q),
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
748
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
749
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
750
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
751
sgl_sdesc = &q->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
756
q->mapping_err++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
760
pos = (u64 *)&q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
767
left = (u8 *)end - (u8 *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
768
end = (void *)q->q.desc + left;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
770
if (pos == (u64 *)q->q.stat) {
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
771
left = (u8 *)end - (u8 *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
772
end = (void *)q->q.desc + left;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
773
pos = (void *)q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
778
cxgb4_inline_tx_skb(skb, &q->q, sgl);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
781
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
786
txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
788
cxgb4_ring_tx_db(adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
58
static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
61
int left = (void *)q->stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
69
memcpy(q->desc, src + left, length - left);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
70
pos = (void *)q->desc + (length - left);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
81
static inline unsigned int chcr_txq_avail(const struct sge_txq *q)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
83
return q->size - 1 - q->in_use;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
86
static inline void chcr_txq_advance(struct sge_txq *q, unsigned int n)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
88
q->in_use += n;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
89
q->pidx += n;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
90
if (q->pidx >= q->size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
91
q->pidx -= q->size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
94
static inline void chcr_eth_txq_stop(struct sge_eth_txq *q)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
96
netif_tx_stop_queue(q->txq);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
97
q->q.stops++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1018
credits = chcr_txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1020
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1025
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1029
pos = &q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1077
chcr_copy_to_txd(buf, &q->q, pos, pktlen);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1079
chcr_txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1080
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1098
struct sge_eth_txq *q, u32 tcp_seq,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1120
credits = chcr_txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1122
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1130
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1134
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1135
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1136
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1137
sgl_sdesc = &q->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1141
q->mapping_err++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1148
pos = &q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1162
ULP_TXPKT_FID_V(q->q.cntxt_id) |
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1203
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1205
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1206
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1210
pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1212
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1215
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1216
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1232
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1236
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1237
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1242
cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1246
chcr_txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1247
cxgb4_ring_tx_db(adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1272
struct sge_eth_txq *q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1308
credits = chcr_txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1310
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1315
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1319
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1320
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1321
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1322
sgl_sdesc = &q->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1326
q->mapping_err++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1330
pos = &q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1344
ULP_TXPKT_FID_V(q->q.cntxt_id) |
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1379
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1381
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1382
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1386
pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1388
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1391
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1392
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1407
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1411
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1412
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1430
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1432
cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1436
chcr_txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1437
cxgb4_ring_tx_db(adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1459
bool tcp_push, struct sge_eth_txq *q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1486
credits = chcr_txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1488
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1493
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1497
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1498
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1499
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1500
sgl_sdesc = &q->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1505
q->mapping_err++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1509
pos = &q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1522
ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1544
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1547
left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1551
left = (void *)end - (void *)q->q.stat;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1552
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1556
cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1560
chcr_txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1561
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1567
struct sge_eth_txq *q)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1586
credits = chcr_txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1588
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1593
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1597
last_desc = q->q.pidx + ndesc - 1;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1598
if (last_desc >= q->q.size)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1599
last_desc -= q->q.size;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1600
sgl_sdesc = &q->q.sdesc[last_desc];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1605
q->mapping_err++;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1612
pos = &q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1640
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1642
chcr_txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1644
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1696
struct sge_eth_txq *q, u32 skb_offset,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1732
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1774
struct sge_eth_txq *q, u32 tls_end_offset)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1805
tcp_push_no_fin, q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1878
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1893
struct sge_eth_txq *q)
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1908
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1931
struct sge_eth_txq *q;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1962
q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1963
cxgb4_reclaim_completed_tx(adap, &q->q, true);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1966
ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
1982
cxgb4_reclaim_completed_tx(adap, &q->q, true);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2008
ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2036
(!th->fin && th->psh), q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2059
(!th->fin && th->psh), q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2075
q, tls_end_offset);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2086
return chcr_ktls_sw_fallback(skb, tx_info, q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
2104
chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
743
struct sge_eth_txq *q, u64 mask,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
754
ULP_TXPKT_FID_V(q->q.cntxt_id) |
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
798
struct sge_eth_txq *q, u32 tid,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
802
int left = (void *)q->q.stat - pos;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
806
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
810
__chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
813
return chcr_copy_to_txd(buf, &q->q, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
818
pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
823
pos = q->q.desc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
839
struct sge_eth_txq *q, u64 tcp_seq,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
854
credits = chcr_txq_avail(&q->q) - ndesc;
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
856
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
861
chcr_eth_txq_stop(q);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
865
pos = &q->q.desc[q->q.pidx];
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
873
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
884
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
895
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
904
chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
923
chcr_txq_advance(&q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
924
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
989
struct sge_eth_txq *q, uint32_t tx_chan)
drivers/net/ethernet/emulex/benet/be.h
150
static inline void *queue_head_node(struct be_queue_info *q)
drivers/net/ethernet/emulex/benet/be.h
152
return q->dma_mem.va + q->head * q->entry_size;
drivers/net/ethernet/emulex/benet/be.h
155
static inline void *queue_tail_node(struct be_queue_info *q)
drivers/net/ethernet/emulex/benet/be.h
157
return q->dma_mem.va + q->tail * q->entry_size;
drivers/net/ethernet/emulex/benet/be.h
160
static inline void *queue_index_node(struct be_queue_info *q, u16 index)
drivers/net/ethernet/emulex/benet/be.h
162
return q->dma_mem.va + index * q->entry_size;
drivers/net/ethernet/emulex/benet/be.h
165
static inline void queue_head_inc(struct be_queue_info *q)
drivers/net/ethernet/emulex/benet/be.h
167
index_inc(&q->head, q->len);
drivers/net/ethernet/emulex/benet/be.h
175
static inline void queue_tail_inc(struct be_queue_info *q)
drivers/net/ethernet/emulex/benet/be.h
177
index_inc(&q->tail, q->len);
drivers/net/ethernet/emulex/benet/be.h
181
struct be_queue_info q;
drivers/net/ethernet/emulex/benet/be.h
203
struct be_queue_info q;
drivers/net/ethernet/emulex/benet/be.h
237
struct be_queue_info q;
drivers/net/ethernet/emulex/benet/be.h
291
struct be_queue_info q;
drivers/net/ethernet/emulex/benet/be_cmds.c
1023
__ilog2_u32(eqo->q.len / 256));
drivers/net/ethernet/emulex/benet/be_cmds.c
1032
eqo->q.id = le16_to_cpu(resp->eq_id);
drivers/net/ethernet/emulex/benet/be_cmds.c
1035
eqo->q.created = true;
drivers/net/ethernet/emulex/benet/be_cmds.c
121
struct be_queue_info *mccq = &adapter->mcc_obj.q;
drivers/net/ethernet/emulex/benet/be_cmds.c
1364
struct be_queue_info *txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_cmds.c
1454
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
drivers/net/ethernet/emulex/benet/be_cmds.c
1495
req->id = cpu_to_le16(q->id);
drivers/net/ethernet/emulex/benet/be_cmds.c
1498
q->created = false;
drivers/net/ethernet/emulex/benet/be_cmds.c
1505
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
drivers/net/ethernet/emulex/benet/be_cmds.c
1522
req->id = cpu_to_le16(q->id);
drivers/net/ethernet/emulex/benet/be_cmds.c
1525
q->created = false;
drivers/net/ethernet/emulex/benet/be_cmds.c
562
atomic_dec(&mcc_obj->q.used);
drivers/net/ethernet/emulex/benet/be_cmds.c
590
if (atomic_read(&mcc_obj->q.used) == 0)
drivers/net/ethernet/emulex/benet/be_cmds.c
608
u32 index = mcc_obj->q.head;
drivers/net/ethernet/emulex/benet/be_cmds.c
611
index_dec(&index, mcc_obj->q.len);
drivers/net/ethernet/emulex/benet/be_cmds.c
612
wrb = queue_index_node(&mcc_obj->q, index);
drivers/net/ethernet/emulex/benet/be_cmds.c
844
struct be_queue_info *mccq = &adapter->mcc_obj.q;
drivers/net/ethernet/emulex/benet/be_cmds.c
862
return adapter->mcc_obj.q.created;
drivers/net/ethernet/emulex/benet/be_cmds.c
999
struct be_dma_mem *q_mem = &eqo->q.dma_mem;
drivers/net/ethernet/emulex/benet/be_cmds.h
2403
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
drivers/net/ethernet/emulex/benet/be_cmds.h
2405
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
drivers/net/ethernet/emulex/benet/be_ethtool.c
692
ring->rx_max_pending = adapter->rx_obj[0].q.len;
drivers/net/ethernet/emulex/benet/be_ethtool.c
693
ring->rx_pending = adapter->rx_obj[0].q.len;
drivers/net/ethernet/emulex/benet/be_ethtool.c
694
ring->tx_max_pending = adapter->tx_obj[0].q.len;
drivers/net/ethernet/emulex/benet/be_ethtool.c
695
ring->tx_pending = adapter->tx_obj[0].q.len;
drivers/net/ethernet/emulex/benet/be_main.c
1226
struct be_queue_info *txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
1434
i, txo->q.head, txo->q.tail,
drivers/net/ethernet/emulex/benet/be_main.c
1435
atomic_read(&txo->q.used), txo->q.id);
drivers/net/ethernet/emulex/benet/be_main.c
1437
entry = txo->q.dma_mem.va;
drivers/net/ethernet/emulex/benet/be_main.c
144
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
drivers/net/ethernet/emulex/benet/be_main.c
146
struct be_dma_mem *mem = &q->dma_mem;
drivers/net/ethernet/emulex/benet/be_main.c
155
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
drivers/net/ethernet/emulex/benet/be_main.c
158
struct be_dma_mem *mem = &q->dma_mem;
drivers/net/ethernet/emulex/benet/be_main.c
160
memset(q, 0, sizeof(*q));
drivers/net/ethernet/emulex/benet/be_main.c
161
q->len = len;
drivers/net/ethernet/emulex/benet/be_main.c
162
q->entry_size = entry_size;
drivers/net/ethernet/emulex/benet/be_main.c
2243
set_eqd[num].eq_id = eqo->q.id;
drivers/net/ethernet/emulex/benet/be_main.c
228
val |= txo->q.id & DB_TXULP_RING_ID_MASK;
drivers/net/ethernet/emulex/benet/be_main.c
2284
struct be_queue_info *rxq = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2597
struct be_queue_info *rxq = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2756
struct be_queue_info *txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2794
eqe = queue_tail_node(&eqo->q);
drivers/net/ethernet/emulex/benet/be_main.c
2801
queue_tail_inc(&eqo->q);
drivers/net/ethernet/emulex/benet/be_main.c
2812
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
drivers/net/ethernet/emulex/benet/be_main.c
2818
struct be_queue_info *rxq = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2888
txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2913
txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2944
if (eqo->q.created) {
drivers/net/ethernet/emulex/benet/be_main.c
2946
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
drivers/net/ethernet/emulex/benet/be_main.c
2950
be_queue_free(adapter, &eqo->q);
drivers/net/ethernet/emulex/benet/be_main.c
2976
eq = &eqo->q;
drivers/net/ethernet/emulex/benet/be_main.c
2997
struct be_queue_info *q;
drivers/net/ethernet/emulex/benet/be_main.c
2999
q = &adapter->mcc_obj.q;
drivers/net/ethernet/emulex/benet/be_main.c
3000
if (q->created)
drivers/net/ethernet/emulex/benet/be_main.c
3001
be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
drivers/net/ethernet/emulex/benet/be_main.c
3002
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3004
q = &adapter->mcc_obj.cq;
drivers/net/ethernet/emulex/benet/be_main.c
3005
if (q->created)
drivers/net/ethernet/emulex/benet/be_main.c
3006
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
drivers/net/ethernet/emulex/benet/be_main.c
3007
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3013
struct be_queue_info *q, *cq;
drivers/net/ethernet/emulex/benet/be_main.c
3021
if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
drivers/net/ethernet/emulex/benet/be_main.c
3024
q = &adapter->mcc_obj.q;
drivers/net/ethernet/emulex/benet/be_main.c
3025
if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
drivers/net/ethernet/emulex/benet/be_main.c
3028
if (be_cmd_mccq_create(adapter, q, cq))
drivers/net/ethernet/emulex/benet/be_main.c
3034
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3045
struct be_queue_info *q;
drivers/net/ethernet/emulex/benet/be_main.c
3050
q = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
3051
if (q->created)
drivers/net/ethernet/emulex/benet/be_main.c
3052
be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
drivers/net/ethernet/emulex/benet/be_main.c
3053
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3055
q = &txo->cq;
drivers/net/ethernet/emulex/benet/be_main.c
3056
if (q->created)
drivers/net/ethernet/emulex/benet/be_main.c
3057
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
drivers/net/ethernet/emulex/benet/be_main.c
3058
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3085
status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
drivers/net/ethernet/emulex/benet/be_main.c
3089
status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
drivers/net/ethernet/emulex/benet/be_main.c
3109
struct be_queue_info *q;
drivers/net/ethernet/emulex/benet/be_main.c
3114
q = &rxo->cq;
drivers/net/ethernet/emulex/benet/be_main.c
3115
if (q->created)
drivers/net/ethernet/emulex/benet/be_main.c
3116
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
drivers/net/ethernet/emulex/benet/be_main.c
3117
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3152
eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
drivers/net/ethernet/emulex/benet/be_main.c
3183
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
drivers/net/ethernet/emulex/benet/be_main.c
3199
be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
drivers/net/ethernet/emulex/benet/be_main.c
3258
if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
drivers/net/ethernet/emulex/benet/be_main.c
3282
atomic_sub(num_wrbs, &txo->q.used);
drivers/net/ethernet/emulex/benet/be_main.c
3332
be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
drivers/net/ethernet/emulex/benet/be_main.c
3336
be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
drivers/net/ethernet/emulex/benet/be_main.c
3570
struct be_queue_info *q;
drivers/net/ethernet/emulex/benet/be_main.c
3575
q = &rxo->q;
drivers/net/ethernet/emulex/benet/be_main.c
3576
if (q->created) {
drivers/net/ethernet/emulex/benet/be_main.c
3585
if (atomic_read(&q->used) == 0)
drivers/net/ethernet/emulex/benet/be_main.c
3590
be_cmd_rxq_destroy(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3594
be_queue_free(adapter, q);
drivers/net/ethernet/emulex/benet/be_main.c
3693
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
drivers/net/ethernet/emulex/benet/be_main.c
3701
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
drivers/net/ethernet/emulex/benet/be_main.c
3709
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
drivers/net/ethernet/emulex/benet/be_main.c
3827
be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
drivers/net/ethernet/emulex/benet/be_main.c
4932
be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
drivers/net/ethernet/emulex/benet/be_main.c
646
erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
drivers/net/ethernet/emulex/benet/be_main.c
817
return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
drivers/net/ethernet/emulex/benet/be_main.c
822
return atomic_read(&txo->q.used) < txo->q.len / 2;
drivers/net/ethernet/emulex/benet/be_main.c
827
return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
drivers/net/ethernet/emulex/benet/be_main.c
919
u32 head = txo->q.head;
drivers/net/ethernet/emulex/benet/be_main.c
921
queue_head_inc(&txo->q);
drivers/net/ethernet/emulex/benet/be_main.c
932
struct be_queue_info *txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
951
struct be_queue_info *txq = &txo->q;
drivers/net/ethernet/emulex/benet/be_main.c
968
struct be_queue_info *txq = &txo->q;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4000
struct dpni_queue q = { { 0 } };
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4006
DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4014
q.destination.id = fq->channel->dpcon_id;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4015
q.destination.type = DPNI_DEST_DPCON;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4016
q.destination.priority = 1;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4017
q.user_context = (u64)(uintptr_t)fq;
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
4019
DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
drivers/net/ethernet/freescale/enetc/enetc.c
3183
int err, tc, q;
drivers/net/ethernet/freescale/enetc/enetc.c
3203
for (q = offset; q < offset + count; q++) {
drivers/net/ethernet/freescale/enetc/enetc.c
3204
tx_ring = priv->tx_ring[q];
drivers/net/ethernet/freescale/fec_main.c
1011
for (q = 0; q < fep->num_tx_queues; q++) {
drivers/net/ethernet/freescale/fec_main.c
1013
txq = fep->tx_queue[q];
drivers/net/ethernet/freescale/fec_main.c
3884
unsigned int q;
drivers/net/ethernet/freescale/fec_main.c
3886
for (q = 0; q < fep->num_rx_queues; q++) {
drivers/net/ethernet/freescale/fec_main.c
3887
rxq = fep->rx_queue[q];
drivers/net/ethernet/freescale/fec_main.c
3896
for (q = 0; q < fep->num_tx_queues; q++) {
drivers/net/ethernet/freescale/fec_main.c
3897
txq = fep->tx_queue[q];
drivers/net/ethernet/freescale/fec_main.c
980
unsigned int q;
drivers/net/ethernet/freescale/fec_main.c
982
for (q = 0; q < fep->num_rx_queues; q++) {
drivers/net/ethernet/freescale/fec_main.c
984
rxq = fep->rx_queue[q];
drivers/net/ethernet/fungible/funeth/funeth_rx.c
103
if (cache_get(q, rb))
drivers/net/ethernet/fungible/funeth/funeth_rx.c
110
rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
112
if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
113
FUN_QSTAT_INC(q, rx_map_err);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
118
FUN_QSTAT_INC(q, rx_page_alloc);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
127
static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
130
dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
141
static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
151
&q->xdp_rxq);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
155
xdp_prog = READ_ONCE(q->xdp_prog);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
171
FUN_QSTAT_INC(q, xdp_tx);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
172
q->xdp_flush |= FUN_XDP_FLUSH_TX;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
177
if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
drivers/net/ethernet/fungible/funeth/funeth_rx.c
179
FUN_QSTAT_INC(q, xdp_redir);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
180
q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
183
bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
186
trace_xdp_exception(q->netdev, xdp_prog, act);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
188
q->cur_buf->pg_refs++; /* return frags' page reference */
drivers/net/ethernet/fungible/funeth/funeth_rx.c
189
FUN_QSTAT_INC(q, xdp_err);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
192
q->cur_buf->pg_refs++;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
193
FUN_QSTAT_INC(q, xdp_drops);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
256
get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
258
if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
268
buf->node == numa_mem_id()) || !q->spare_buf.page) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
269
dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
273
cache_offer(q, buf);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
274
*buf = q->spare_buf;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
275
q->spare_buf.page = NULL;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
276
q->rqes[q->rq_cons & q->rq_mask] =
drivers/net/ethernet/fungible/funeth/funeth_rx.c
279
q->buf_offset = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
280
q->rq_cons++;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
281
return &q->bufs[q->rq_cons & q->rq_mask];
drivers/net/ethernet/fungible/funeth/funeth_rx.c
296
static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
299
struct funeth_rxbuf *buf = q->cur_buf;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
304
buf = get_buf(q, buf, tot_len);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
312
if (!q->spare_buf.page &&
drivers/net/ethernet/fungible/funeth/funeth_rx.c
313
funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
drivers/net/ethernet/fungible/funeth/funeth_rx.c
318
PAGE_SIZE - q->buf_offset);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
319
dma_sync_single_for_cpu(q->dma_dev,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
320
buf->dma_addr + q->buf_offset,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
326
skb_frag_fill_page_desc(frags++, buf->page, q->buf_offset,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
333
q->buf_offset = PAGE_SIZE;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
335
q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
336
q->cur_buf = buf;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
348
static void advance_cq(struct funeth_rxq *q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
350
if (unlikely(q->cq_head == q->cq_mask)) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
351
q->cq_head = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
352
q->phase ^= 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
353
q->next_cqe_info = cqe_to_info(q->cqes);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
355
q->cq_head++;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
356
q->next_cqe_info += FUNETH_CQE_SIZE;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
358
prefetch(q->next_cqe_info);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
365
static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
367
const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
369
struct net_device *ndev = q->netdev;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
379
u64_stats_update_begin(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
380
q->stats.rx_pkts++;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
381
q->stats.rx_bytes += pkt_len;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
382
u64_stats_update_end(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
384
advance_cq(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
392
ref_ok = fun_gather_pkt(q, tot_len, frags);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
395
va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
415
skb = napi_get_frags(q->napi);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
432
skb_record_rx_queue(skb, q->qidx);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
434
if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
drivers/net/ethernet/fungible/funeth/funeth_rx.c
437
if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
438
FUN_QSTAT_INC(q, rx_cso);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
442
if (unlikely(rx_hwtstamp_enabled(q->netdev)))
drivers/net/ethernet/fungible/funeth/funeth_rx.c
445
trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
447
gro_res = skb->data_len ? napi_gro_frags(q->napi) :
drivers/net/ethernet/fungible/funeth/funeth_rx.c
448
napi_gro_receive(q->napi, skb);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
450
FUN_QSTAT_INC(q, gro_merged);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
452
FUN_QSTAT_INC(q, gro_pkts);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
456
FUN_QSTAT_INC(q, rx_mem_drops);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
461
q->cur_buf->pg_refs++;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
479
static int fun_process_cqes(struct funeth_rxq *q, int budget)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
481
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
488
while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
492
fun_handle_cqe_pkt(q, xdp_q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
496
if (unlikely(q->xdp_flush)) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
497
if (q->xdp_flush & FUN_XDP_FLUSH_TX)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
499
if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
50
static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
501
q->xdp_flush = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
513
struct funeth_rxq *q = irq->rxq;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
514
int work_done = budget - fun_process_cqes(q, budget);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
515
u32 cq_db_val = q->cq_head;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
518
FUN_QSTAT_INC(q, rx_budget);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
52
struct funeth_rx_cache *c = &q->cache;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
520
cq_db_val |= q->irq_db_val;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
523
if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
524
u64_stats_update_begin(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
525
q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
526
u64_stats_update_end(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
527
q->rq_cons_db = q->rq_cons;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
528
writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
531
writel(cq_db_val, q->cq_db);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
536
static void fun_rxq_free_bufs(struct funeth_rxq *q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
538
struct funeth_rxbuf *b = q->bufs;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
541
for (i = 0; i <= q->rq_mask; i++, b++)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
542
funeth_free_page(q, b);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
544
funeth_free_page(q, &q->spare_buf);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
545
q->cur_buf = NULL;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
549
static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
551
struct funeth_rxbuf *b = q->bufs;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
554
for (i = 0; i <= q->rq_mask; i++, b++) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
555
if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
556
fun_rxq_free_bufs(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
559
q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
561
q->cur_buf = q->bufs;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
575
static void fun_rxq_free_cache(struct funeth_rxq *q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
577
struct funeth_rxbuf *b = q->cache.bufs;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
58
dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
580
for (i = 0; i <= q->cache.mask; i++, b++)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
581
funeth_free_page(q, b);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
583
kvfree(q->cache.bufs);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
584
q->cache.bufs = NULL;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
587
int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
589
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
595
if (headroom != q->headroom) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
600
0, q->hw_cqid, headroom);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
605
q->headroom = headroom;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
608
WRITE_ONCE(q->xdp_prog, prog);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
620
struct funeth_rxq *q;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
625
q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
626
if (!q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
629
q->qidx = qidx;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
630
q->netdev = dev;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
631
q->cq_mask = ncqe - 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
632
q->rq_mask = nrqe - 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
633
q->numa_node = numa_node;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
634
q->rq_db_thres = nrqe / 4;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
635
u64_stats_init(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
636
q->dma_dev = &fp->pdev->dev;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
638
q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
drivers/net/ethernet/fungible/funeth/funeth_rx.c
639
sizeof(*q->bufs), false, numa_node,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
640
&q->rq_dma_addr, (void **)&q->bufs, NULL);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
641
if (!q->rqes)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
644
q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
645
false, numa_node, &q->cq_dma_addr, NULL,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
647
if (!q->cqes)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
650
err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
654
err = fun_rxq_alloc_bufs(q, numa_node);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
658
q->stats.rx_bufs = q->rq_mask;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
659
q->init_state = FUN_QSTATE_INIT_SW;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
660
return q;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
663
fun_rxq_free_cache(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
665
dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
666
q->cq_dma_addr);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
668
fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
669
q->rq_dma_addr, q->bufs);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
67
static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
671
kfree(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
677
static void fun_rxq_free_sw(struct funeth_rxq *q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
679
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
681
fun_rxq_free_cache(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
682
fun_rxq_free_bufs(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
683
fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
684
q->rqes, q->rq_dma_addr, q->bufs);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
685
dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
686
q->cqes, q->cq_dma_addr);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
689
fp->rx_packets += q->stats.rx_pkts;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
69
struct funeth_rx_cache *c = &q->cache;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
690
fp->rx_bytes += q->stats.rx_bytes;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
691
fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
693
kfree(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
697
int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
699
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
700
unsigned int ncqe = q->cq_mask + 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
701
unsigned int nrqe = q->rq_mask + 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
704
err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
709
err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
714
q->phase = 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
715
q->irq_cnt = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
716
q->cq_head = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
717
q->rq_cons = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
718
q->rq_cons_db = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
719
q->buf_offset = 0;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
720
q->napi = &irq->napi;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
721
q->irq_db_val = fp->cq_irq_db;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
722
q->next_cqe_info = cqe_to_info(q->cqes);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
724
q->xdp_prog = fp->xdp_prog;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
725
q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
729
FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
731
&q->hw_sqid, &q->rq_db);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
737
q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
738
q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
740
&q->hw_cqid, &q->cq_db);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
744
irq->rxq = q;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
745
writel(q->rq_mask, q->rq_db);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
746
q->init_state = FUN_QSTATE_INIT_FULL;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
748
netif_info(fp, ifup, q->netdev,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
750
q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
751
q->numa_node, q->headroom);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
755
fun_destroy_sq(fp->fdev, q->hw_sqid);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
757
xdp_rxq_info_unreg(&q->xdp_rxq);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
759
netdev_err(q->netdev,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
761
q->qidx, err);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
765
static void fun_rxq_free_dev(struct funeth_rxq *q)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
767
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
77
dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
770
if (q->init_state < FUN_QSTATE_INIT_FULL)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
773
irq = container_of(q->napi, struct fun_irq, napi);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
774
netif_info(fp, ifdown, q->netdev,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
776
q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
779
xdp_rxq_info_unreg(&q->xdp_rxq);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
780
fun_destroy_sq(fp->fdev, q->hw_sqid);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
781
fun_destroy_cq(fp->fdev, q->hw_cqid);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
782
q->init_state = FUN_QSTATE_INIT_SW;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
792
struct funeth_rxq *q = *qp;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
795
if (!q) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
796
q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
797
if (IS_ERR(q))
drivers/net/ethernet/fungible/funeth/funeth_rx.c
798
return PTR_ERR(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
801
if (q->init_state >= state)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
804
err = fun_rxq_create_dev(q, irq);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
807
fun_rxq_free_sw(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
812
*qp = q;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
817
struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
820
fun_rxq_free_dev(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
823
fun_rxq_free_sw(q);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
824
q = NULL;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
827
return q;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
88
dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
98
static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
107
static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
132
FUN_QSTAT_INC(q, tx_tls_fallback);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
135
FUN_QSTAT_INC(q, tx_tls_drops);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
149
static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
153
unsigned int idx = q->prod_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
165
if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
167
FUN_QSTAT_INC(q, tx_map_err);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
171
req = fun_tx_desc_addr(q, idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
218
FUN_QSTAT_INC(q, tx_encap_tso);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
236
FUN_QSTAT_INC(q, tx_uso);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
253
FUN_QSTAT_INC(q, tx_tso);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
256
u64_stats_update_begin(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
257
q->stats.tx_cso += shinfo->gso_segs;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
258
u64_stats_update_end(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
269
FUN_QSTAT_INC(q, tx_cso);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
277
gle = fun_write_gl(q, req, addrs, lens, ngle);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
290
u64_stats_update_begin(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
291
q->stats.tx_tls_bytes += tls_len;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
292
q->stats.tx_tls_pkts += 1 + extra_pkts;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
293
u64_stats_update_end(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
296
u64_stats_update_begin(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
297
q->stats.tx_bytes += skb->len + extra_bytes;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
298
q->stats.tx_pkts += 1 + extra_pkts;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
299
u64_stats_update_end(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
301
q->info[idx].skb = skb;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
303
trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
311
static unsigned int fun_txq_avail(const struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
313
return q->mask - q->prod_cnt + q->cons_cnt;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
317
static void fun_tx_check_stop(struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
319
if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
drivers/net/ethernet/fungible/funeth/funeth_tx.c
322
netif_tx_stop_queue(q->ndq);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
329
if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
drivers/net/ethernet/fungible/funeth/funeth_tx.c
330
FUN_QSTAT_INC(q, tx_nstops);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
332
netif_tx_start_queue(q->ndq);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
338
static bool fun_txq_may_restart(struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
340
return fun_txq_avail(q) >= q->mask / 4;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
347
struct funeth_txq *q = fp->txqs[qid];
drivers/net/ethernet/fungible/funeth/funeth_tx.c
352
skb = fun_tls_tx(skb, q, &tls_len);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
357
ndesc = write_pkt_desc(skb, q, tls_len);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
363
q->prod_cnt += ndesc;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
364
fun_tx_check_stop(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
368
if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
drivers/net/ethernet/fungible/funeth/funeth_tx.c
369
fun_txq_wr_db(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
371
FUN_QSTAT_INC(q, tx_more);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
380
fun_txq_wr_db(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
385
static u16 txq_hw_head(const struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
387
return (u16)be64_to_cpu(*q->hw_wb);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
393
static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
395
const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
401
dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
drivers/net/ethernet/fungible/funeth/funeth_tx.c
404
for (gle++; --ngle && txq_to_end(q, gle); gle++)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
405
dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
drivers/net/ethernet/fungible/funeth/funeth_tx.c
409
for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
410
dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
drivers/net/ethernet/fungible/funeth/funeth_tx.c
423
static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
431
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
432
head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
441
unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
442
struct sk_buff *skb = q->info[reclaim_idx].skb;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
444
trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
449
reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
454
q->cons_cnt += ndesc;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
455
netdev_tx_completed_queue(q->ndq, npkts, nbytes);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
458
if (unlikely(netif_tx_queue_stopped(q->ndq) &&
drivers/net/ethernet/fungible/funeth/funeth_tx.c
459
fun_txq_may_restart(q))) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
460
netif_tx_wake_queue(q->ndq);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
461
FUN_QSTAT_INC(q, tx_nrestarts);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
471
struct funeth_txq *q = irq->txq;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
474
if (fun_txq_reclaim(q, budget))
drivers/net/ethernet/fungible/funeth/funeth_tx.c
478
db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
479
writel(db_val, q->db);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
484
static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
488
for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
489
head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
498
unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
500
xdp_return_frame(q->info[reclaim_idx].xdpf);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
502
trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
504
reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
510
q->cons_cnt += ndesc;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
514
bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
522
if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
523
fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
534
if (unlikely(fun_txq_avail(q) < ndesc)) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
535
FUN_QSTAT_INC(q, tx_xdp_full);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
539
if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
541
FUN_QSTAT_INC(q, tx_map_err);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
545
idx = q->prod_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
546
req = fun_tx_desc_addr(q, idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
556
fun_write_gl(q, req, dma, lens, nfrags);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
558
q->info[idx].xdpf = xdpf;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
56
static void *txq_end(const struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
560
u64_stats_update_begin(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
561
q->stats.tx_bytes += tot_len;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
562
q->stats.tx_pkts++;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
563
u64_stats_update_end(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
565
trace_funeth_tx(q, tot_len, idx, nfrags);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
566
q->prod_cnt += ndesc;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
575
struct funeth_txq *q, **xdpqs;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
58
return (void *)q->hw_wb;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
589
for (q = xdpqs[q_idx], i = 0; i < n; i++)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
590
if (!fun_xdp_tx(q, frames[i]))
drivers/net/ethernet/fungible/funeth/funeth_tx.c
594
fun_txq_wr_db(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
601
static void fun_txq_purge(struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
603
while (q->cons_cnt != q->prod_cnt) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
604
unsigned int idx = q->cons_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
606
q->cons_cnt += fun_unmap_pkt(q, idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
607
dev_kfree_skb_any(q->info[idx].skb);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
609
netdev_tx_reset_queue(q->ndq);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
612
static void fun_xdpq_purge(struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
614
while (q->cons_cnt != q->prod_cnt) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
615
unsigned int idx = q->cons_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
617
q->cons_cnt += fun_unmap_pkt(q, idx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
618
xdp_return_frame(q->info[idx].xdpf);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
629
struct funeth_txq *q;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
637
q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
638
if (!q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
64
static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
641
q->dma_dev = &fp->pdev->dev;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
642
q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
643
sizeof(*q->info), true, numa_node,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
644
&q->dma_addr, (void **)&q->info,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
645
&q->hw_wb);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
646
if (!q->desc)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
649
q->netdev = dev;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
650
q->mask = ndesc - 1;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
651
q->qidx = qidx;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
652
q->numa_node = numa_node;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
653
u64_stats_init(&q->syncp);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
654
q->init_state = FUN_QSTATE_INIT_SW;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
655
return q;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
658
kfree(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
66
return txq_end(q) - p;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
665
static void fun_txq_free_sw(struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
667
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
669
fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
670
q->desc, q->dma_addr, q->info);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
672
fp->tx_packets += q->stats.tx_pkts;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
673
fp->tx_bytes += q->stats.tx_bytes;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
674
fp->tx_dropped += q->stats.tx_map_err;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
676
kfree(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
680
int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
682
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
683
unsigned int irq_idx, ndesc = q->mask + 1;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
686
q->irq = irq;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
687
*q->hw_wb = 0;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
688
q->prod_cnt = 0;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
689
q->cons_cnt = 0;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
696
q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
698
&q->hw_qid, &q->db);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
702
err = fun_create_and_bind_tx(fp, q->hw_qid);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
705
q->ethid = err;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
708
irq->txq = q;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
709
q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
710
q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
712
writel(q->irq_db_val, q->db);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
715
q->init_state = FUN_QSTATE_INIT_FULL;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
716
netif_info(fp, ifup, q->netdev,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
718
irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
719
q->ethid, q->numa_node);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
723
fun_destroy_sq(fp->fdev, q->hw_qid);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
725
netdev_err(q->netdev,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
727
irq ? "Tx" : "XDP", q->qidx, err);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
731
static void fun_txq_free_dev(struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
733
struct funeth_priv *fp = netdev_priv(q->netdev);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
735
if (q->init_state < FUN_QSTATE_INIT_FULL)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
738
netif_info(fp, ifdown, q->netdev,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
740
q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
741
q->irq ? q->irq->irq_idx : 0, q->ethid);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
743
fun_destroy_sq(fp->fdev, q->hw_qid);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
744
fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
746
if (q->irq) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
747
q->irq->txq = NULL;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
748
fun_txq_purge(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
750
fun_xdpq_purge(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
753
q->init_state = FUN_QSTATE_INIT_SW;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
763
struct funeth_txq *q = *qp;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
766
if (!q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
767
q = fun_txq_create_sw(dev, qidx, ndesc, irq);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
768
if (!q)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
771
if (q->init_state >= state)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
774
err = fun_txq_create_dev(q, irq);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
777
fun_txq_free_sw(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
78
static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
drivers/net/ethernet/fungible/funeth/funeth_tx.c
782
*qp = q;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
789
struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
792
fun_txq_free_dev(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
795
fun_txq_free_sw(q);
drivers/net/ethernet/fungible/funeth/funeth_tx.c
796
q = NULL;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
799
return q;
drivers/net/ethernet/fungible/funeth/funeth_tx.c
90
i < ngle && txq_to_end(q, gle); i++, gle++)
drivers/net/ethernet/fungible/funeth/funeth_tx.c
93
if (txq_to_end(q, gle) == 0) {
drivers/net/ethernet/fungible/funeth/funeth_tx.c
94
gle = (struct fun_dataop_gl *)q->desc;
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
201
#define FUN_QSTAT_INC(q, counter) \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
203
u64_stats_update_begin(&(q)->syncp); \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
204
(q)->stats.counter++; \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
205
u64_stats_update_end(&(q)->syncp); \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
208
#define FUN_QSTAT_READ(q, seq, stats_copy) \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
210
seq = u64_stats_fetch_begin(&(q)->syncp); \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
211
stats_copy = (q)->stats; \
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
212
} while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
229
static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
232
return q->desc + idx * FUNETH_SQE_SIZE;
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
235
static inline void fun_txq_wr_db(const struct funeth_txq *q)
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
237
unsigned int tail = q->prod_cnt & q->mask;
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
239
writel(tail, q->db);
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
250
bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
257
int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq);
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
258
struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state);
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
262
int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq);
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
263
struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state);
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
264
int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog);
drivers/net/ethernet/hisilicon/hns/hnae.c
195
hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
drivers/net/ethernet/hisilicon/hns/hnae.c
202
ring->q = q;
drivers/net/ethernet/hisilicon/hns/hnae.c
204
ring->coal_param = q->handle->coal_param;
drivers/net/ethernet/hisilicon/hns/hnae.c
238
static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
drivers/net/ethernet/hisilicon/hns/hnae.c
243
q->dev = dev;
drivers/net/ethernet/hisilicon/hns/hnae.c
244
q->handle = h;
drivers/net/ethernet/hisilicon/hns/hnae.c
246
ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
drivers/net/ethernet/hisilicon/hns/hnae.c
250
ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
drivers/net/ethernet/hisilicon/hns/hnae.c
255
dev->ops->init_queue(q);
drivers/net/ethernet/hisilicon/hns/hnae.c
260
hnae_fini_ring(&q->tx_ring);
drivers/net/ethernet/hisilicon/hns/hnae.c
265
static void hnae_fini_queue(struct hnae_queue *q)
drivers/net/ethernet/hisilicon/hns/hnae.c
267
if (q->dev->ops->fini_queue)
drivers/net/ethernet/hisilicon/hns/hnae.c
268
q->dev->ops->fini_queue(q);
drivers/net/ethernet/hisilicon/hns/hnae.c
270
hnae_fini_ring(&q->tx_ring);
drivers/net/ethernet/hisilicon/hns/hnae.c
271
hnae_fini_ring(&q->rx_ring);
drivers/net/ethernet/hisilicon/hns/hnae.h
270
struct hnae_queue *q;
drivers/net/ethernet/hisilicon/hns/hnae.h
466
void (*init_queue)(struct hnae_queue *q);
drivers/net/ethernet/hisilicon/hns/hnae.h
467
void (*fini_queue)(struct hnae_queue *q);
drivers/net/ethernet/hisilicon/hns/hnae.h
564
#define ring_to_dev(ring) ((ring)->q->dev->dev)
drivers/net/ethernet/hisilicon/hns/hnae.h
579
#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
drivers/net/ethernet/hisilicon/hns/hnae.h
580
(q)->tx_ring.io_base + RCB_REG_TAIL)
drivers/net/ethernet/hisilicon/hns/hnae.h
589
struct hnae_buf_ops *bops = ring->q->handle->bops;
drivers/net/ethernet/hisilicon/hns/hnae.h
622
ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
drivers/net/ethernet/hisilicon/hns/hnae.h
628
struct hnae_buf_ops *bops = ring->q->handle->bops;
drivers/net/ethernet/hisilicon/hns/hnae.h
642
struct hnae_buf_ops *bops = ring->q->handle->bops;
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
111
ae_handle->qs[i] = &ring_pair_cb->q;
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
112
ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
113
ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
192
static void hns_ae_init_queue(struct hnae_queue *q)
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
195
container_of(q, struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
200
static void hns_ae_fini_queue(struct hnae_queue *q)
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
202
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
205
hns_rcb_reset_ring_hw(q);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
296
struct hnae_queue *q;
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
315
q = handle->qs[i];
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
316
q->rx_ring.buf_size = rx_buf_size;
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
317
hns_rcb_set_rx_ring_bs(q, rx_buf_size);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
397
hns_rcb_int_ctrl_hw(ring->q, flag, mask);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
409
hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
520
container_of(handle->qs[0], struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
532
container_of(handle->qs[0], struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
550
container_of(handle->qs[0], struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
561
container_of(handle->qs[0], struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
62
static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
64
return container_of(q, struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
102
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1036
= container_of(queue, struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
104
dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
107
could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
111
dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
113
dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
116
could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
121
dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
128
dev_err(q->dev->dev, "port%d reset ring fail\n",
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
129
hns_ae_get_vf_cb(q->handle)->port_index);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
138
void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
143
dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
144
dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
149
dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
150
dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
155
void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
158
dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
159
dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
163
dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
164
dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
168
void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
173
dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
176
dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
179
void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
182
dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
185
dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
193
void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
195
dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
213
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
217
dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
225
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
229
dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
240
struct hnae_queue *q = &ring_pair->q;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
242
(ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
246
dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
248
dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
251
hns_rcb_set_rx_ring_bs(q, ring->buf_size);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
253
dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
255
dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
258
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
260
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
263
hns_rcb_set_tx_ring_bs(q, ring->buf_size);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
265
dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
267
dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
432
static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
440
ring_pair_cb = container_of(q, struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
443
ring = &q->rx_ring;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
444
ring->io_base = ring_pair_cb->q.io_base;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
448
ring = &q->tx_ring;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
449
ring->io_base = ring_pair_cb->q.io_base +
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
476
ring_pair_cb->q.handle = NULL;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
478
hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
479
hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
521
ring_pair_cb->q.io_base =
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
535
ring_pair_cb->q.phy_base =
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
812
container_of(queue, struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
846
container_of(queue, struct ring_pair_cb, q);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
88
void hns_rcb_reset_ring_hw(struct hnae_queue *q)
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
98
tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
125
void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
126
void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
127
void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
128
void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
129
void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
132
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
160
void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
161
void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
87
struct hnae_queue q;
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
570
#define hns_xgmac_cpy_q(p, q) \
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
572
*(p) = (u32)(q);\
drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
573
*((p) + 1) = (u32)((q) >> 32);\
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1031
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1036
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1088
ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
1101
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
drivers/net/ethernet/hisilicon/hns/hns_enet.c
67
HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
741
bool coal_enable = ring->q->handle->coal_adapt_en;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
752
bool coal_enable = ring->q->handle->coal_adapt_en;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
802
struct hnae_handle *handle = ring->q->handle;
drivers/net/ethernet/hisilicon/hns/hns_enet.c
904
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
drivers/net/ethernet/hisilicon/hns/hns_enet.c
908
if (ring->q->handle->coal_adapt_en)
drivers/net/ethernet/hisilicon/hns/hns_enet.c
913
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
drivers/net/ethernet/hisilicon/hns/hns_enet.c
931
if (ring->q->handle->coal_adapt_en)
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
111
tqp = container_of(queue, struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
18
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
23
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
46
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
52
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
68
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
26
struct hnae3_queue q;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2822
struct netdev_queue *q;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2824
q = netdev_get_tx_queue(ndev, i);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2825
timedout_ms = netif_xmit_timeout_ms(q);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2828
struct dql *dql = &q->dql;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2835
q->state, timedout_ms);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4944
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4952
ring = &priv->ring[q->tqp_index];
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4954
ring->queue_index = q->tqp_index;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4958
ring = &priv->ring[q->tqp_index + queue_num];
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4960
ring->queue_index = q->tqp_index;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4966
ring->tqp = q;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
4971
ring->buf_size = q->buf_size;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5130
struct hnae3_queue *q = ring->tqp;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5133
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5134
hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5137
hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5139
hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5142
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5144
hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5147
hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5162
struct hnae3_queue *q;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5164
q = priv->ring[tc_info->tqp_offset[i] + j].tqp;
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
5165
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
10818
tqp = container_of(queue, struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1644
tqp->q.ae_algo = &ae_algo;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1645
tqp->q.buf_size = hdev->rx_buf_len;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1646
tqp->q.tx_desc_num = hdev->num_tx_desc;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1647
tqp->q.rx_desc_num = hdev->num_rx_desc;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1653
tqp->q.io_base = hdev->hw.hw.io_base +
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1657
tqp->q.io_base = hdev->hw.hw.io_base +
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1668
tqp->q.mem_base = hdev->hw.hw.mem_base +
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1710
hdev->htqp[i].q.handle = &vport->nic;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1711
hdev->htqp[i].q.tqp_index = alloced;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1712
hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1713
hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1714
kinfo->tqp[alloced] = &hdev->htqp[i].q;
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1766
struct hclge_comm_tqp *q =
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1767
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1772
ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
1124
container_of(queue, struct hclge_comm_tqp, q);
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
960
struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
963
hclge_get_queue_id(q),
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
316
tqp->q.ae_algo = &ae_algovf;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
317
tqp->q.buf_size = hdev->rx_buf_len;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
318
tqp->q.tx_desc_num = hdev->num_tx_desc;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
319
tqp->q.rx_desc_num = hdev->num_rx_desc;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
325
tqp->q.io_base = hdev->hw.hw.io_base +
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
329
tqp->q.io_base = hdev->hw.hw.io_base +
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
340
tqp->q.mem_base = hdev->hw.hw.mem_base +
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
377
hdev->htqp[i].q.handle = &hdev->nic;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
378
hdev->htqp[i].q.tqp_index = i;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
379
kinfo->tqp[i] = &hdev->htqp[i].q;
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
150
tqp = &hdev->htqp[j].q;
drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
70
#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi))
drivers/net/ethernet/intel/fm10k/fm10k_common.c
329
struct fm10k_hw_stats_q *q,
drivers/net/ethernet/intel/fm10k/fm10k_common.c
341
&q->tx_packets);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
346
&q->tx_bytes);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
358
if (q->tx_stats_idx == id_tx) {
drivers/net/ethernet/intel/fm10k/fm10k_common.c
359
q->tx_packets.count += tx_packets;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
360
q->tx_bytes.count += tx_bytes;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
364
fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
365
fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
367
q->tx_stats_idx = id_tx;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
380
struct fm10k_hw_stats_q *q,
drivers/net/ethernet/intel/fm10k/fm10k_common.c
392
&q->rx_drops);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
395
&q->rx_packets);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
400
&q->rx_bytes);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
412
if (q->rx_stats_idx == id_rx) {
drivers/net/ethernet/intel/fm10k/fm10k_common.c
413
q->rx_drops.count += rx_drops;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
414
q->rx_packets.count += rx_packets;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
415
q->rx_bytes.count += rx_bytes;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
419
fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
420
fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
421
fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
423
q->rx_stats_idx = id_rx;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
436
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
drivers/net/ethernet/intel/fm10k/fm10k_common.c
441
for (i = 0; i < count; i++, idx++, q++) {
drivers/net/ethernet/intel/fm10k/fm10k_common.c
442
fm10k_update_hw_stats_tx_q(hw, q, idx);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
443
fm10k_update_hw_stats_rx_q(hw, q, idx);
drivers/net/ethernet/intel/fm10k/fm10k_common.c
455
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count)
drivers/net/ethernet/intel/fm10k/fm10k_common.c
459
for (i = 0; i < count; i++, q++) {
drivers/net/ethernet/intel/fm10k/fm10k_common.c
460
q->rx_stats_idx = 0;
drivers/net/ethernet/intel/fm10k/fm10k_common.c
461
q->tx_stats_idx = 0;
drivers/net/ethernet/intel/fm10k/fm10k_common.h
43
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
drivers/net/ethernet/intel/fm10k/fm10k_common.h
46
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count);
drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
93
rxd->w.sglort, rxd->q.timestamp);
drivers/net/ethernet/intel/fm10k/fm10k_main.c
137
rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
drivers/net/ethernet/intel/fm10k/fm10k_main.c
458
FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1332
int q;
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1340
for (q = 255;;) {
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1342
if (q < FM10K_MAX_QUEUES_PF) {
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1344
fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1352
q &= ~(32 - 1);
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1354
if (!q)
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1357
if (q-- % 32)
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1360
maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32));
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
1362
fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq);
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
611
struct fm10k_hw_stats_q *q = &interface->stats.q[i];
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
613
tx_bytes_nic += q->tx_bytes.count;
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
614
tx_pkts_nic += q->tx_packets.count;
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
615
rx_bytes_nic += q->rx_bytes.count;
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
616
rx_pkts_nic += q->rx_packets.count;
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
617
rx_drops_nic += q->rx_drops.count;
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
1134
struct fm10k_hw_stats_q *q,
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
1142
fm10k_update_hw_stats_q(hw, q, idx, qpp);
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
1367
fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
1392
fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
drivers/net/ethernet/intel/fm10k/fm10k_type.h
439
struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF];
drivers/net/ethernet/intel/fm10k/fm10k_type.h
702
} q; /* Read, Writeback, 64b quad-words */
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
454
fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
468
fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
drivers/net/ethernet/intel/i40e/i40e_main.c
3882
int i, q;
drivers/net/ethernet/intel/i40e/i40e_main.c
3919
for (q = 0; q < q_vector->num_ringpairs; q++) {
drivers/net/ethernet/intel/i40e/i40e_main.c
3952
if (q == (q_vector->num_ringpairs - 1))
drivers/net/ethernet/intel/i40e/i40e_main.c
886
u16 q;
drivers/net/ethernet/intel/i40e/i40e_main.c
911
for (q = 0; q < vsi->num_queue_pairs; q++) {
drivers/net/ethernet/intel/i40e/i40e_main.c
913
p = READ_ONCE(vsi->tx_rings[q]);
drivers/net/ethernet/intel/i40e/i40e_main.c
931
p = READ_ONCE(vsi->rx_rings[q]);
drivers/net/ethernet/intel/i40e/i40e_main.c
951
p = READ_ONCE(vsi->xdp_rings[q]);
drivers/net/ethernet/intel/i40e/i40e_trace.h
62
TP_PROTO(struct napi_struct *napi, struct i40e_q_vector *q, int budget,
drivers/net/ethernet/intel/i40e/i40e_trace.h
66
TP_ARGS(napi, q, budget, budget_per_ring, rx_cleaned, tx_cleaned,
drivers/net/ethernet/intel/i40e/i40e_trace.h
78
__string(qname, q->name)
drivers/net/ethernet/intel/i40e/i40e_trace.h
90
__entry->irq_num = q->irq_num;
drivers/net/ethernet/intel/i40e/i40e_trace.h
94
__assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
drivers/net/ethernet/intel/i40e/i40e_xsk.c
63
unsigned long q;
drivers/net/ethernet/intel/i40e/i40e_xsk.c
65
for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
drivers/net/ethernet/intel/i40e/i40e_xsk.c
66
rx_ring = vsi->rx_rings[q];
drivers/net/ethernet/intel/ice/ice_lib.c
1920
int i, q;
drivers/net/ethernet/intel/ice/ice_lib.c
1939
for (q = 0; q < q_vector->num_ring_tx; q++) {
drivers/net/ethernet/intel/ice/ice_lib.c
1945
for (q = 0; q < q_vector->num_ring_rx; q++) {
drivers/net/ethernet/intel/ice/ice_lib.c
2607
int i, q;
drivers/net/ethernet/intel/ice/ice_lib.c
2613
for (q = 0; q < q_vector->num_ring_tx; q++) {
drivers/net/ethernet/intel/ice/ice_lib.c
2624
for (q = 0; q < q_vector->num_ring_rx; q++) {
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1064
u16 queue = tc_fltr->action.fwd.q.queue;
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1085
int q;
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1108
tc_fltr->action.fwd.q.queue);
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1114
q = tc_fltr->action.fwd.q.queue;
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1115
dest_vsi = ice_locate_vsi_using_queue(vsi, q);
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1205
rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1210
tc_fltr->action.fwd.q.queue,
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1211
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1273
lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
drivers/net/ethernet/intel/ice/ice_tc_lib.c
1274
tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
drivers/net/ethernet/intel/ice/ice_tc_lib.c
2049
fltr->action.fwd.q.queue = queue;
drivers/net/ethernet/intel/ice/ice_tc_lib.c
2051
fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
drivers/net/ethernet/intel/ice/ice_tc_lib.c
2056
ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue);
drivers/net/ethernet/intel/ice/ice_tc_lib.h
65
} q;
drivers/net/ethernet/intel/ice/ice_xsk.c
83
int q, _qid = qid;
drivers/net/ethernet/intel/ice/ice_xsk.c
87
for (q = 0; q < q_vector->num_ring_tx; q++) {
drivers/net/ethernet/intel/ice/ice_xsk.c
94
for (q = 0; q < q_vector->num_ring_rx; q++) {
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1018
static void idpf_add_queue_stats(u64 **data, const void *q,
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1030
stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1034
stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
1044
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1471
const struct idpf_buf_queue *q =
drivers/net/ethernet/intel/idpf/idpf_lib.c
1474
writel(q->next_to_alloc, q->tail);
drivers/net/ethernet/intel/idpf/idpf_lib.c
1478
const struct idpf_rx_queue *q =
drivers/net/ethernet/intel/idpf/idpf_lib.c
1481
writel(q->next_to_alloc, q->tail);
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
599
struct idpf_tx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
601
q = q_vec->tx[i];
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
602
clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1021
err = idpf_tx_desc_alloc(vport, q->txq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1024
err = idpf_compl_desc_alloc(vport, q->complq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1042
const struct idpf_queue_ptr *q = &qs->qs[i];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1044
switch (q->type) {
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1046
idpf_xdp_rxq_info_deinit(q->rxq, rsrc->rxq_model);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1047
idpf_rx_desc_rel(q->rxq, rsrc->dev, rsrc->rxq_model);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1050
idpf_rx_desc_rel_bufq(q->bufq, rsrc->dev);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1053
idpf_tx_desc_rel(q->txq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1055
if (idpf_queue_has(XDP, q->txq)) {
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1056
q->txq->pending = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1057
q->txq->xdp_tx = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1059
q->txq->txq_grp->num_completions_pending = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1062
writel(q->txq->next_to_use, q->txq->tail);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1065
idpf_compl_desc_rel(q->complq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1066
q->complq->num_completions = 0;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1212
const struct idpf_queue_ptr *q = &qs->qs[i];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1214
if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1217
if (!idpf_queue_has(XSK, q->txq))
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1222
q->txq->q_vector = q_vector;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1223
q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1692
struct idpf_rx_queue *q)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1698
q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1700
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1739
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1741
q->dev = &adapter->pdev->dev;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1742
q->desc_count = rsrc->txq_desc_count;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1743
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1744
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1745
q->netdev = vport->netdev;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1746
q->txq_grp = tx_qgrp;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1747
q->rel_q_id = j;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1750
q->clean_budget = vport->compln_clean_budget;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1751
idpf_queue_assign(CRC_EN, q,
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1758
idpf_queue_set(FLOW_SCH_EN, q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1760
q->refillq = kzalloc_obj(*q->refillq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1761
if (!q->refillq)
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1764
idpf_queue_set(GEN_CHK, q->refillq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1765
idpf_queue_set(RFL_GEN_CHK, q->refillq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1854
struct idpf_buf_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1856
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1857
q->desc_count = rsrc->bufq_desc_count[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1858
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1860
idpf_queue_assign(HSPLIT_EN, q, hs);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1861
idpf_queue_assign(RSC_EN, q, rsc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1889
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1892
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1893
q->rx_ptype_lkup = adapter->singleq_pt_lkup;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1896
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1903
idpf_queue_assign(HSPLIT_EN, q, hs);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1904
idpf_queue_assign(RSC_EN, q, rsc);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1905
q->rx_ptype_lkup = adapter->splitq_pt_lkup;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1908
q->desc_count = rsrc->rxq_desc_count;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1909
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1910
q->idx = (i * num_rxq) + j;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1911
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1912
q->rx_max_pkt_size = vport->netdev->mtu +
drivers/net/ethernet/intel/idpf/idpf_txrx.c
1914
idpf_rxq_set_descids(rsrc, q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
2391
desc->q.qw1.cmd_dtype =
drivers/net/ethernet/intel/idpf/idpf_txrx.c
2393
desc->q.qw1.cmd_dtype |=
drivers/net/ethernet/intel/idpf/idpf_txrx.c
2395
desc->q.qw1.buf_size = cpu_to_le16(size);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
2396
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
2688
tx_desc->q.buf_addr = cpu_to_le64(dma);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
2757
tx_desc->q.buf_addr = cpu_to_le64(dma);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4395
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4398
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4400
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4401
q->q_vector = &rsrc->q_vectors[qv_idx];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4402
q_index = q->q_vector->num_rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4403
q->q_vector->rx[q_index] = q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4404
q->q_vector->num_rxq++;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4407
q->napi = &q->q_vector->napi;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4437
struct idpf_tx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4439
q = tx_qgrp->txqs[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4440
q->q_vector = &rsrc->q_vectors[qv_idx];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4441
q->q_vector->tx[q->q_vector->num_txq++] = q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4445
struct idpf_compl_queue *q = tx_qgrp->complq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4447
q->q_vector = &rsrc->q_vectors[qv_idx];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
4448
q->q_vector->complq[q->q_vector->num_complq++] = q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
804
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
806
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
807
err = idpf_rx_bufs_init_singleq(q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
818
struct idpf_buf_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
820
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
821
q->truesize = truesize;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
825
err = idpf_rx_bufs_init(q, type);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
829
truesize = q->truesize >> 1;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
921
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
924
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
926
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
928
err = idpf_rx_desc_alloc(vport, q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
941
struct idpf_buf_queue *q;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
943
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
drivers/net/ethernet/intel/idpf/idpf_txrx.c
945
err = idpf_bufq_desc_alloc(vport, q);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
972
const struct idpf_queue_ptr *q = &qs->qs[i];
drivers/net/ethernet/intel/idpf/idpf_txrx.c
975
switch (q->type) {
drivers/net/ethernet/intel/idpf/idpf_txrx.c
977
err = idpf_rx_desc_alloc(vport, q->rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
981
err = idpf_xdp_rxq_info_init(q->rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
986
err = idpf_rx_bufs_init_singleq(q->rxq);
drivers/net/ethernet/intel/idpf/idpf_txrx.c
990
bufq = q->bufq;
drivers/net/ethernet/intel/idpf/idpf_txrx.h
100
if (unlikely(++(idx) == (q)->desc_count)) \
drivers/net/ethernet/intel/idpf/idpf_txrx.h
148
struct idpf_flex_tx_desc q; /* queue based scheduling */
drivers/net/ethernet/intel/idpf/idpf_txrx.h
311
#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
312
#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
313
#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
314
#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
316
#define idpf_queue_has_clear(f, q) \
drivers/net/ethernet/intel/idpf/idpf_txrx.h
317
__test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
318
#define idpf_queue_assign(f, q, v) \
drivers/net/ethernet/intel/idpf/idpf_txrx.h
319
__assign_bit(__IDPF_Q_##f, (q)->flags, v)
drivers/net/ethernet/intel/idpf/idpf_txrx.h
98
#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1433
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1435
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1436
q->tail = idpf_get_reg_addr(adapter,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1447
struct idpf_buf_queue *q;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1449
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1450
q->tail = idpf_get_reg_addr(adapter,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1743
const struct idpf_tx_queue *q,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1748
qi->queue_id = cpu_to_le32(q->q_id);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1751
qi->ring_len = cpu_to_le16(q->desc_count);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1752
qi->dma_ring_addr = cpu_to_le64(q->dma);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1753
qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1760
if (idpf_queue_has(XDP, q))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1761
val = q->complq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1763
val = q->txq_grp->complq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1767
if (idpf_queue_has(FLOW_SCH_EN, q))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1782
const struct idpf_compl_queue *q,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1787
qi->queue_id = cpu_to_le32(q->q_id);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1790
qi->ring_len = cpu_to_le16(q->desc_count);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1791
qi->dma_ring_addr = cpu_to_le64(q->dma);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1793
if (idpf_queue_has(FLOW_SCH_EN, q))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1914
struct idpf_rx_queue *q,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1919
qi->queue_id = cpu_to_le32(q->q_id);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1922
qi->ring_len = cpu_to_le16(q->desc_count);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1923
qi->dma_ring_addr = cpu_to_le64(q->dma);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1924
qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1925
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1927
if (idpf_queue_has(RSC_EN, q))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1931
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1932
qi->desc_ids = cpu_to_le64(q->rxdids);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1937
sets = q->bufq_sets;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1943
q->rx_buf_size = sets[0].bufq.rx_buf_size;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1944
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1952
q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1954
if (idpf_queue_has(HSPLIT_EN, q)) {
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1956
qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1969
const struct idpf_buf_queue *q,
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1972
qi->queue_id = cpu_to_le32(q->q_id);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1975
qi->ring_len = cpu_to_le16(q->desc_count);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1976
qi->dma_ring_addr = cpu_to_le64(q->dma);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1977
qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1978
qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1981
if (idpf_queue_has(RSC_EN, q))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1984
if (idpf_queue_has(HSPLIT_EN, q)) {
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
1986
qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2169
const struct idpf_queue_ptr *q = &qs->qs[i];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2172
qc[i].type = cpu_to_le32(q->type);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2175
switch (q->type) {
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2177
qid = q->rxq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2180
qid = q->txq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2183
qid = q->bufq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2186
qid = q->complq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2338
const struct idpf_queue_ptr *q = &qs->qs[i];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2342
vqv[i].queue_type = cpu_to_le32(q->type);
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2344
switch (q->type) {
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2346
qid = q->rxq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2348
if (idpf_queue_has(NOIRQ, q->rxq))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2351
vec = q->rxq->q_vector;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2362
qid = q->txq->q_id;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2364
if (idpf_queue_has(NOIRQ, q->txq))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2366
else if (idpf_queue_has(XDP, q->txq))
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2367
vec = q->txq->complq->q_vector;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2369
vec = q->txq->txq_grp->complq->q_vector;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
2371
vec = q->txq->q_vector;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3937
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3940
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3942
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3943
q->q_id = qids[k];
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3960
struct idpf_buf_queue *q;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3962
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
3963
q->q_id = qids[k];
drivers/net/ethernet/intel/idpf/xdp.c
29
struct idpf_rx_queue *q;
drivers/net/ethernet/intel/idpf/xdp.c
33
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
drivers/net/ethernet/intel/idpf/xdp.c
35
q = rx_qgrp->singleq.rxqs[j];
drivers/net/ethernet/intel/idpf/xdp.c
37
err = fn(q, arg);
drivers/net/ethernet/intel/idpf/xdp.h
66
*(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
drivers/net/ethernet/intel/idpf/xdp.h
68
xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
drivers/net/ethernet/intel/idpf/xsk.c
100
void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
drivers/net/ethernet/intel/idpf/xsk.c
108
idpf_xsk_setup_rxq(vport, q);
drivers/net/ethernet/intel/idpf/xsk.c
111
idpf_xsk_setup_bufq(vport, q);
drivers/net/ethernet/intel/idpf/xsk.c
114
idpf_xsk_setup_txq(vport, q);
drivers/net/ethernet/intel/idpf/xsk.c
117
idpf_xsk_setup_complq(vport, q);
drivers/net/ethernet/intel/idpf/xsk.c
124
void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
drivers/net/ethernet/intel/idpf/xsk.c
133
rxq = q;
drivers/net/ethernet/intel/idpf/xsk.c
140
bufq = q;
drivers/net/ethernet/intel/idpf/xsk.c
147
txq = q;
drivers/net/ethernet/intel/idpf/xsk.c
155
complq = q;
drivers/net/ethernet/intel/idpf/xsk.c
436
struct idpf_buf_queue *q;
drivers/net/ethernet/intel/idpf/xsk.c
447
struct idpf_buf_queue *bufq = set->bufqs[i].q;
drivers/net/ethernet/intel/idpf/xsk.c
502
bufq = set.bufqs[bufq_id].q;
drivers/net/ethernet/intel/idpf/xsk.c
505
set.bufqs[bufq_id].q = bufq;
drivers/net/ethernet/intel/idpf/xsk.h
18
void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
drivers/net/ethernet/intel/idpf/xsk.h
20
void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
drivers/net/ethernet/intel/igb/e1000_nvm.c
690
u8 q, hval, rem, result;
drivers/net/ethernet/intel/igb/e1000_nvm.c
767
q = eeprom_verl / NVM_HEX_CONV;
drivers/net/ethernet/intel/igb/e1000_nvm.c
768
hval = q * NVM_HEX_TENS;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1326
u16 vf, q;
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1334
for (q = 0; q < IXGBE_MAX_TX_QUEUES; q++) {
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1335
if (ixgbe_check_illegal_queue(adapter, q) &&
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
1336
!ixgbe_get_vf_idx(adapter, q, &vf))
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
3882
u32 i, j, reg, q, div, vf;
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
3911
q = j + (i * IXGBE_QUEUES_PER_REG);
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
3913
vf = q / div;
drivers/net/ethernet/intel/libeth/xdp.c
321
bq->q[bq->count++] = netmem;
drivers/net/ethernet/marvell/mv643xx_eth.c
134
#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
drivers/net/ethernet/marvell/mv643xx_eth.c
136
#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
drivers/net/ethernet/marvell/mv643xx_eth.c
137
#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
drivers/net/ethernet/marvell/mv643xx_eth.c
138
#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
drivers/net/ethernet/marvell/mv643xx_eth.c
139
#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
drivers/net/ethernet/marvell/mvneta.c
136
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
188
#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
drivers/net/ethernet/marvell/mvneta.c
197
#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
drivers/net/ethernet/marvell/mvneta.c
198
#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
drivers/net/ethernet/marvell/mvneta.c
241
#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
242
#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
245
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
248
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
261
#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
265
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
299
#define MVNETA_QUEUE_NEXT_DESC(q, index) \
drivers/net/ethernet/marvell/mvneta.c
300
(((index) < (q)->last_desc) ? ((index) + 1) : 0)
drivers/net/ethernet/marvell/mvneta.c
45
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
53
#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
55
#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
56
#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
59
#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
61
#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
drivers/net/ethernet/marvell/mvneta.c
84
#define MVNETA_DEF_RXQ(q) ((q) << 1)
drivers/net/ethernet/marvell/mvneta.c
85
#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
drivers/net/ethernet/marvell/mvneta.c
87
#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
drivers/net/ethernet/marvell/mvneta.c
88
#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
drivers/net/ethernet/marvell/mvneta.c
89
#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
drivers/net/ethernet/marvell/mvneta.c
91
#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
drivers/net/ethernet/marvell/mvneta.c
92
MVNETA_DEF_RXQ_ARP(q) | \
drivers/net/ethernet/marvell/mvneta.c
93
MVNETA_DEF_RXQ_TCP(q) | \
drivers/net/ethernet/marvell/mvneta.c
94
MVNETA_DEF_RXQ_UDP(q) | \
drivers/net/ethernet/marvell/mvneta.c
95
MVNETA_DEF_RXQ_BPDU(q) | \
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
398
#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
402
#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
404
#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
600
#define MVPP2_QUEUE_NEXT_DESC(q, index) \
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
601
(((index) < (q)->last_desc) ? ((index) + 1) : 0)
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
799
#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
828
#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
830
#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
832
#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq))
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1989
int i, q;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2000
for (q = 0; q < port->ntxqs; q++)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2003
ethtool_sprintf(&data, str, q);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2006
for (q = 0; q < port->nrxqs; q++)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2009
ethtool_sprintf(&data, str, q);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2060
int i, q;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2072
for (q = 0; q < port->ntxqs; q++)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2075
MVPP22_CTRS_TX_CTR(port->id, q),
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2081
for (q = 0; q < port->nrxqs; q++)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2084
port->first_rxq + q,
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
798
int val, cm3_state, host_id, q;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
813
for (q = 0; q < port->nrxqs; q++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
817
mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
819
val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
821
val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
822
val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
823
val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
836
host_id = q;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
841
val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
844
mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
859
int val, cm3_state, q;
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
874
for (q = 0; q < port->nrxqs; q++) {
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
878
mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
880
val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
882
val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
884
val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
887
mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
156
int q;
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
160
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
161
cn93_reset_iq(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
162
cn93_reset_oq(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
183
int q;
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
185
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
191
octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
193
regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
195
CN93_SDP_EPVF_RING(pf_srn + q), regval);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
809
u8 q;
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
811
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
812
octep_enable_iq_cn93_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
813
octep_enable_oq_cn93_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
843
int q = 0;
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
845
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
846
octep_disable_iq_cn93_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
847
octep_disable_oq_cn93_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
854
u8 srn, num_rings, q;
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
859
for (q = srn; q < srn + num_rings; q++)
drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
860
cn93_dump_regs(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
180
int q;
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
184
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
185
cnxk_reset_iq(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
186
cnxk_reset_oq(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
207
int q;
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
209
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
215
octep_write_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q), regval);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
217
regval = octep_read_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q));
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
219
CNXK_SDP_EPVF_RING(pf_srn + q), regval);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
848
u8 q;
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
850
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
851
octep_enable_iq_cnxk_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
852
octep_enable_oq_cnxk_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
882
int q = 0;
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
884
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
885
octep_disable_iq_cnxk_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
886
octep_disable_oq_cnxk_pf(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
893
u8 srn, num_rings, q;
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
898
for (q = srn; q < srn + num_rings; q++)
drivers/net/ethernet/marvell/octeon_ep/octep_cnxk_pf.c
899
cnxk_dump_regs(oct, q);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
133
octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz)
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
139
qbuf = (q->hw_q + *pi);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
143
*pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
146
cp_sz = min((q->sz - *pi), w_sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
149
*pi = octep_ctrl_mbox_circq_inc(*pi, cp_sz, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
153
qbuf = (q->hw_q + *pi);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
155
*pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
163
struct octep_ctrl_mbox_q *q;
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
174
q = &mbox->h2fq;
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
175
pi = readl(q->hw_prod);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
176
ci = readl(q->hw_cons);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
178
if (octep_ctrl_mbox_circq_space(pi, ci, q->sz) < (msg->hdr.s.sz + mbox_hdr_sz)) {
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
183
octep_write_mbox_data(q, &pi, ci, (void *)&msg->hdr, mbox_hdr_sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
188
octep_write_mbox_data(q, &pi, ci, sg->msg, w_sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
191
writel(pi, q->hw_prod);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
198
octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz)
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
204
qbuf = (q->hw_q + *ci);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
208
*ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
211
cp_sz = min((q->sz - *ci), r_sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
214
*ci = octep_ctrl_mbox_circq_inc(*ci, cp_sz, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
218
qbuf = (q->hw_q + *ci);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
220
*ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
229
struct octep_ctrl_mbox_q *q;
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
236
q = &mbox->f2hq;
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
237
pi = readl(q->hw_prod);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
238
ci = readl(q->hw_cons);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
240
q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
246
octep_read_mbox_data(q, pi, &ci, (void *)&msg->hdr, mbox_hdr_sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
251
octep_read_mbox_data(q, pi, &ci, sg->msg, r_sz);
drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
254
writel(ci, q->hw_cons);
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
133
int q, i;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
153
for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
154
tx_packets += oct->stats_iq[q].instr_completed;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
155
tx_bytes += oct->stats_iq[q].bytes_sent;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
156
tx_busy_errors += oct->stats_iq[q].tx_busy;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
158
rx_packets += oct->stats_oq[q].packets;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
159
rx_bytes += oct->stats_oq[q].bytes;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
160
rx_alloc_errors += oct->stats_oq[q].alloc_failures;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
198
for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
199
data[i++] = oct->stats_iq[q].instr_posted;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
200
data[i++] = oct->stats_iq[q].instr_completed;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
201
data[i++] = oct->stats_iq[q].bytes_sent;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
202
data[i++] = oct->stats_iq[q].tx_busy;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
206
for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
207
data[i++] = oct->stats_oq[q].packets;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
208
data[i++] = oct->stats_oq[q].bytes;
drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
209
data[i++] = oct->stats_oq[q].alloc_failures;
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1011
int q;
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1017
for (q = 0; q < OCTEP_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1018
tx_packets += oct->stats_iq[q].instr_completed;
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1019
tx_bytes += oct->stats_iq[q].bytes_sent;
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1020
rx_packets += oct->stats_oq[q].packets;
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
1021
rx_bytes += oct->stats_oq[q].bytes;
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
105
void (*enable_iq)(struct octep_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
106
void (*disable_iq)(struct octep_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
107
void (*enable_oq)(struct octep_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
108
void (*disable_oq)(struct octep_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
79
void (*setup_iq_regs)(struct octep_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep/octep_main.h
80
int (*setup_oq_regs)(struct octep_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
125
int q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
129
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
130
cn93_vf_reset_iq(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
131
cn93_vf_reset_oq(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
312
int num_rings, q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
316
for (q = 0; q < num_rings; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
317
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
319
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
321
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
323
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
333
int num_rings, q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
341
for (q = 0; q < num_rings; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
342
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
344
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
346
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
348
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
407
u8 q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
409
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
410
octep_vf_enable_iq_cn93(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
411
octep_vf_enable_oq_cn93(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
438
int q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
440
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
441
octep_vf_disable_iq_cn93(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
442
octep_vf_disable_oq_cn93(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
449
u8 num_rings, q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
452
for (q = 0; q < num_rings; q++)
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
453
cn93_vf_dump_q_regs(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
127
int q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
131
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
132
cnxk_vf_reset_iq(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
133
cnxk_vf_reset_oq(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
355
int num_rings, q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
359
for (q = 0; q < num_rings; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
360
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
362
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
364
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
366
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
376
int num_rings, q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
384
for (q = 0; q < num_rings; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
385
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
387
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
389
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
391
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
450
u8 q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
452
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
453
octep_vf_enable_iq_cnxk(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
454
octep_vf_enable_oq_cnxk(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
481
int q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
483
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
484
octep_vf_disable_iq_cnxk(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
485
octep_vf_disable_oq_cnxk(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
492
u8 num_rings, q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
495
for (q = 0; q < num_rings; q++)
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
496
cnxk_vf_dump_q_regs(oct, q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
108
int q, i;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
117
for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
118
tx_busy_errors += oct->stats_iq[q].tx_busy;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
119
rx_alloc_errors += oct->stats_oq[q].alloc_failures;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
134
for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
135
data[i++] = oct->stats_iq[q].instr_posted;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
136
data[i++] = oct->stats_iq[q].instr_completed;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
137
data[i++] = oct->stats_iq[q].bytes_sent;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
138
data[i++] = oct->stats_iq[q].tx_busy;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
142
for (q = 0; q < oct->num_oqs; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
143
data[i++] = oct->stats_oq[q].packets;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
144
data[i++] = oct->stats_oq[q].bytes;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
145
data[i++] = oct->stats_oq[q].alloc_failures;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
799
int q;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
805
for (q = 0; q < OCTEP_VF_MAX_QUEUES; q++) {
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
806
tx_packets += oct->stats_iq[q].instr_completed;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
807
tx_bytes += oct->stats_iq[q].bytes_sent;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
808
rx_packets += oct->stats_oq[q].packets;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
809
rx_bytes += oct->stats_oq[q].bytes;
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
57
void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
58
int (*setup_oq_regs)(struct octep_vf_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
71
void (*enable_iq)(struct octep_vf_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
72
void (*disable_iq)(struct octep_vf_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
73
void (*enable_oq)(struct octep_vf_device *oct, int q);
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
74
void (*disable_oq)(struct octep_vf_device *oct, int q);
drivers/net/ethernet/marvell/octeontx2/af/common.h
47
static inline int qmem_alloc(struct device *dev, struct qmem **q,
drivers/net/ethernet/marvell/octeontx2/af/common.h
56
*q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL);
drivers/net/ethernet/marvell/octeontx2/af/common.h
57
if (!*q)
drivers/net/ethernet/marvell/octeontx2/af/common.h
59
qmem = *q;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
83
u64 q : 20;
drivers/net/ethernet/marvell/prestera/prestera_pci.c
145
#define PRESTERA_CMDQ_REG_OFFSET(q, f) \
drivers/net/ethernet/marvell/prestera/prestera_pci.c
147
(q) * sizeof(struct prestera_fw_cmdq_regs) + \
drivers/net/ethernet/marvell/prestera/prestera_pci.c
150
#define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
151
#define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
152
#define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
153
#define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
154
#define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
155
#define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
172
#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
drivers/net/ethernet/marvell/prestera/prestera_pci.c
174
(q) * sizeof(struct prestera_fw_evtq_regs) + \
drivers/net/ethernet/marvell/prestera/prestera_pci.c
177
#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
178
#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
179
#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs)
drivers/net/ethernet/marvell/prestera/prestera_pci.c
180
#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len)
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
280
int q;
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
290
for (q = 0; q < qnum && pkts_done < budget; q++) {
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
291
struct prestera_rx_ring *ring = &sdma->rx_ring[q];
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
301
rxq_done_map &= ~BIT(q);
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
303
rxq_done_map |= BIT(q);
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
336
int q, b;
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
342
for (q = 0; q < qnum; q++) {
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
343
struct prestera_rx_ring *ring = &sdma->rx_ring[q];
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
372
int q;
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
378
for (q = 0; q < qnum; q++) {
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
380
struct prestera_rx_ring *ring = &sdma->rx_ring[q];
drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
415
prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
drivers/net/ethernet/marvell/skge.c
2484
static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
drivers/net/ethernet/marvell/skge.c
2492
skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
drivers/net/ethernet/marvell/skge.c
2493
skge_write32(hw, RB_ADDR(q, RB_START), start);
drivers/net/ethernet/marvell/skge.c
2494
skge_write32(hw, RB_ADDR(q, RB_WP), start);
drivers/net/ethernet/marvell/skge.c
2495
skge_write32(hw, RB_ADDR(q, RB_RP), start);
drivers/net/ethernet/marvell/skge.c
2496
skge_write32(hw, RB_ADDR(q, RB_END), end);
drivers/net/ethernet/marvell/skge.c
2498
if (q == Q_R1 || q == Q_R2) {
drivers/net/ethernet/marvell/skge.c
2500
skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
drivers/net/ethernet/marvell/skge.c
2502
skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
drivers/net/ethernet/marvell/skge.c
2508
skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
drivers/net/ethernet/marvell/skge.c
2511
skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
drivers/net/ethernet/marvell/skge.c
2515
static void skge_qset(struct skge_port *skge, u16 q,
drivers/net/ethernet/marvell/skge.c
2526
skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
drivers/net/ethernet/marvell/skge.c
2527
skge_write32(hw, Q_ADDR(q, Q_F), watermark);
drivers/net/ethernet/marvell/skge.c
2528
skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
drivers/net/ethernet/marvell/skge.c
2529
skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
drivers/net/ethernet/marvell/sky2.c
1037
static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
drivers/net/ethernet/marvell/sky2.c
1046
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
drivers/net/ethernet/marvell/sky2.c
1047
sky2_write32(hw, RB_ADDR(q, RB_START), start);
drivers/net/ethernet/marvell/sky2.c
1048
sky2_write32(hw, RB_ADDR(q, RB_END), end);
drivers/net/ethernet/marvell/sky2.c
1049
sky2_write32(hw, RB_ADDR(q, RB_WP), start);
drivers/net/ethernet/marvell/sky2.c
1050
sky2_write32(hw, RB_ADDR(q, RB_RP), start);
drivers/net/ethernet/marvell/sky2.c
1052
if (q == Q_R1 || q == Q_R2) {
drivers/net/ethernet/marvell/sky2.c
1059
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
drivers/net/ethernet/marvell/sky2.c
1060
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
drivers/net/ethernet/marvell/sky2.c
1063
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
drivers/net/ethernet/marvell/sky2.c
1064
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
drivers/net/ethernet/marvell/sky2.c
1069
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
drivers/net/ethernet/marvell/sky2.c
1072
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
drivers/net/ethernet/marvell/sky2.c
1073
sky2_read8(hw, RB_ADDR(q, RB_CTRL));
drivers/net/ethernet/marvell/sky2.c
1077
static void sky2_qset(struct sky2_hw *hw, u16 q)
drivers/net/ethernet/marvell/sky2.c
1079
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
drivers/net/ethernet/marvell/sky2.c
1080
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
drivers/net/ethernet/marvell/sky2.c
1081
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
drivers/net/ethernet/marvell/sky2.c
1082
sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
drivers/net/ethernet/marvell/sky2.c
1126
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
drivers/net/ethernet/marvell/sky2.c
1130
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
drivers/net/ethernet/marvell/sky2.c
2915
static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
drivers/net/ethernet/marvell/sky2.c
2918
u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
drivers/net/ethernet/marvell/sky2.c
2921
dev->name, (unsigned) q, (unsigned) idx,
drivers/net/ethernet/marvell/sky2.c
2922
(unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
drivers/net/ethernet/marvell/sky2.c
2924
sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
drivers/net/ethernet/marvell/sky2.h
782
#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg))
drivers/net/ethernet/mediatek/mtk_wed_wo.c
102
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
103
int index = (q->tail + 1) % q->n_desc;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
108
if (!q->queued)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
112
q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
113
else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
drivers/net/ethernet/mediatek/mtk_wed_wo.c
116
q->tail = index;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
117
q->queued--;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
119
desc = &q->desc[index];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
120
entry = &q->entry[index];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
134
mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
140
while (q->queued < q->n_desc) {
drivers/net/ethernet/mediatek/mtk_wed_wo.c
145
buf = page_frag_alloc(&q->cache, q->buf_size,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
150
addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
156
q->head = (q->head + 1) % q->n_desc;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
157
entry = &q->entry[q->head];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
159
entry->len = q->buf_size;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
160
q->entry[q->head].buf = buf;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
163
struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
171
q->queued++;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
186
mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
194
data = mtk_wed_wo_dequeue(wo, q, &len, false);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
198
skb = build_skb(data, q->buf_size);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
217
if (mtk_wed_wo_queue_refill(wo, q, true)) {
drivers/net/ethernet/mediatek/mtk_wed_wo.c
218
u32 index = (q->head - 1) % q->n_desc;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
220
mtk_wed_wo_queue_kick(wo, q, index);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
257
mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
261
q->regs = *regs;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
262
q->n_desc = n_desc;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
263
q->buf_size = buf_size;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
265
q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
drivers/net/ethernet/mediatek/mtk_wed_wo.c
266
&q->desc_dma, GFP_KERNEL);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
267
if (!q->desc)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
270
q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
drivers/net/ethernet/mediatek/mtk_wed_wo.c
272
if (!q->entry)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
279
mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
281
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
282
dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
283
q->desc_dma);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
287
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
291
for (i = 0; i < q->n_desc; i++) {
drivers/net/ethernet/mediatek/mtk_wed_wo.c
292
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
303
page_frag_cache_drain(&q->cache);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
307
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
310
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
318
page_frag_cache_drain(&q->cache);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
322
mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
drivers/net/ethernet/mediatek/mtk_wed_wo.c
324
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
325
mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
326
mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
329
int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
337
q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
338
index = (q->head + 1) % q->n_desc;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
339
if (q->tail == index) {
drivers/net/ethernet/mediatek/mtk_wed_wo.c
344
entry = &q->entry[index];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
350
desc = &q->desc[index];
drivers/net/ethernet/mediatek/mtk_wed_wo.c
351
q->head = index;
drivers/net/ethernet/mediatek/mtk_wed_wo.c
364
mtk_wed_wo_queue_kick(wo, q, q->head);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
91
mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
drivers/net/ethernet/mediatek/mtk_wed_wo.c
95
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
drivers/net/ethernet/mediatek/mtk_wed_wo.c
99
mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
drivers/net/ethernet/mediatek/mtk_wed_wo.h
280
int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
1006
pp_params.napi = &q->u.cq.napi;
drivers/net/ethernet/mellanox/mlxsw/pci.c
101
struct mlxsw_pci_queue *q;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1014
q->u.cq.page_pool = page_pool;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1018
static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
1024
page_pool_destroy(q->u.cq.page_pool);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1028
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1030
enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1034
q->consumer_counter = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1036
for (i = 0; i < q->count; i++) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
1037
char *elem = mlxsw_pci_queue_elem_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1039
mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1042
if (q->u.cq.v == MLXSW_PCI_CQE_V1)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1045
else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1051
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
drivers/net/ethernet/mellanox/mlxsw/pci.c
1053
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1057
err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1060
mlxsw_pci_cq_napi_setup(q, cq_type);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1062
err = mlxsw_pci_cq_page_pool_init(q, cq_type);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1066
napi_enable(&q->u.cq.napi);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1067
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1068
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1072
mlxsw_pci_cq_napi_teardown(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1077
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1079
enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1081
napi_disable(&q->u.cq.napi);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1082
mlxsw_pci_cq_page_pool_fini(q, cq_type);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1083
mlxsw_pci_cq_napi_teardown(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1084
mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1087
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1089
return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
drivers/net/ethernet/mellanox/mlxsw/pci.c
1093
static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1095
return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
drivers/net/ethernet/mellanox/mlxsw/pci.c
1099
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1105
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1108
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
drivers/net/ethernet/mellanox/mlxsw/pci.c
1110
q->consumer_counter++;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1118
struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1119
struct mlxsw_pci *mlxsw_pci = q->pci;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1120
int credits = q->count >> 1;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1127
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
1138
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1139
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1143
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1144
napi_schedule(&q->u.cq.napi);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1149
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1158
WARN_ON_ONCE(q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1159
q->num = MLXSW_PCI_EQ_COMP_NUM;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1161
q->consumer_counter = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1163
for (i = 0; i < q->count; i++) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
1164
char *elem = mlxsw_pci_queue_elem_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1171
mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
drivers/net/ethernet/mellanox/mlxsw/pci.c
1173
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1177
err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1180
tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1181
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1182
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1187
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1189
mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1196
struct mlxsw_pci_queue *q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1198
struct mlxsw_pci_queue *q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1200
struct mlxsw_pci_queue *q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1201
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1202
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1242
struct mlxsw_pci_queue *q, u8 q_num)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1244
struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1248
q->num = q_num;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1250
q_ops->pre_init(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1252
spin_lock_init(&q->lock);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1253
q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
drivers/net/ethernet/mellanox/mlxsw/pci.c
1255
q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
drivers/net/ethernet/mellanox/mlxsw/pci.c
1257
q->type = q_ops->type;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1258
q->pci = mlxsw_pci;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1267
q->elem_info = kzalloc_objs(*q->elem_info, q->count);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1268
if (!q->elem_info) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
1276
for (i = 0; i < q->count; i++) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
1279
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1281
__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1285
err = q_ops->init(mlxsw_pci, mbox, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1291
kfree(q->elem_info);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1300
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1302
struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1304
q_ops->fini(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1305
kfree(q->elem_info);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1319
queue_group->q = kzalloc_objs(*queue_group->q, num_qs);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1320
if (!queue_group->q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
1325
&queue_group->q[i], i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1335
mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1336
kfree(queue_group->q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1348
mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1349
kfree(queue_group->q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
1737
struct mlxsw_pci_queue *q;
drivers/net/ethernet/mellanox/mlxsw/pci.c
1739
q = mlxsw_pci_eq_get(mlxsw_pci);
drivers/net/ethernet/mellanox/mlxsw/pci.c
174
static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
1740
tasklet_schedule(&q->u.eq.tasklet);
drivers/net/ethernet/mellanox/mlxsw/pci.c
177
return q->mem_item.buf + (elem_size * elem_index);
drivers/net/ethernet/mellanox/mlxsw/pci.c
181
mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
drivers/net/ethernet/mellanox/mlxsw/pci.c
183
return &q->elem_info[elem_index];
drivers/net/ethernet/mellanox/mlxsw/pci.c
187
mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
189
int index = q->producer_counter & (q->count - 1);
drivers/net/ethernet/mellanox/mlxsw/pci.c
191
if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
drivers/net/ethernet/mellanox/mlxsw/pci.c
193
return mlxsw_pci_queue_elem_info_get(q, index);
drivers/net/ethernet/mellanox/mlxsw/pci.c
197
mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
199
int index = q->consumer_counter & (q->count - 1);
drivers/net/ethernet/mellanox/mlxsw/pci.c
201
return mlxsw_pci_queue_elem_info_get(q, index);
drivers/net/ethernet/mellanox/mlxsw/pci.c
204
static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
drivers/net/ethernet/mellanox/mlxsw/pci.c
206
return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
drivers/net/ethernet/mellanox/mlxsw/pci.c
209
static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
drivers/net/ethernet/mellanox/mlxsw/pci.c
211
return owner_bit != !!(q->consumer_counter & q->count);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2152
struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2154
return !mlxsw_pci_queue_elem_info_producer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2161
struct mlxsw_pci_queue *q;
drivers/net/ethernet/mellanox/mlxsw/pci.c
2177
q = mlxsw_pci_sdq_pick(mlxsw_pci, &txhdr_info->tx_info);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2178
spin_lock_bh(&q->lock);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2179
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2219
q->producer_counter++;
drivers/net/ethernet/mellanox/mlxsw/pci.c
2220
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
2228
spin_unlock_bh(&q->lock);
drivers/net/ethernet/mellanox/mlxsw/pci.c
225
return &mlxsw_pci->queues[q_type].q[q_num];
drivers/net/ethernet/mellanox/mlxsw/pci.c
248
struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
253
mlxsw_pci_doorbell_type_offset[q->type],
drivers/net/ethernet/mellanox/mlxsw/pci.c
254
q->num), val);
drivers/net/ethernet/mellanox/mlxsw/pci.c
258
struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
263
mlxsw_pci_doorbell_arm_type_offset[q->type],
drivers/net/ethernet/mellanox/mlxsw/pci.c
264
q->num), val);
drivers/net/ethernet/mellanox/mlxsw/pci.c
268
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
271
__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
drivers/net/ethernet/mellanox/mlxsw/pci.c
275
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
278
__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
279
q->consumer_counter + q->count);
drivers/net/ethernet/mellanox/mlxsw/pci.c
284
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
287
__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
drivers/net/ethernet/mellanox/mlxsw/pci.c
290
static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
293
return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
drivers/net/ethernet/mellanox/mlxsw/pci.c
297
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
306
q->producer_counter = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
307
q->consumer_counter = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
308
tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
drivers/net/ethernet/mellanox/mlxsw/pci.c
310
lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
drivers/net/ethernet/mellanox/mlxsw/pci.c
314
cq_num = q->num;
drivers/net/ethernet/mellanox/mlxsw/pci.c
320
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
325
err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
330
cq->u.cq.dq = q;
drivers/net/ethernet/mellanox/mlxsw/pci.c
331
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
336
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
338
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
393
static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
397
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
446
static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
450
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
458
mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
drivers/net/ethernet/mellanox/mlxsw/pci.c
463
static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
467
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
480
mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
489
if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
drivers/net/ethernet/mellanox/mlxsw/pci.c
500
mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
505
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
510
err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
512
dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
drivers/net/ethernet/mellanox/mlxsw/pci.c
527
mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
drivers/net/ethernet/mellanox/mlxsw/pci.c
530
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
538
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
547
q->producer_counter = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
548
q->consumer_counter = 0;
drivers/net/ethernet/mellanox/mlxsw/pci.c
553
cq_num = sdq_count + q->num;
drivers/net/ethernet/mellanox/mlxsw/pci.c
557
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
562
err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
567
cq->u.cq.dq = q;
drivers/net/ethernet/mellanox/mlxsw/pci.c
568
q->u.rdq.cq = cq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
570
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
572
for (i = 0; i < q->count; i++) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
573
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
577
err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
drivers/net/ethernet/mellanox/mlxsw/pci.c
582
q->producer_counter++;
drivers/net/ethernet/mellanox/mlxsw/pci.c
583
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
590
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
592
mlxsw_pci_rdq_page_free(q, elem_info, j);
drivers/net/ethernet/mellanox/mlxsw/pci.c
595
q->u.rdq.cq = NULL;
drivers/net/ethernet/mellanox/mlxsw/pci.c
597
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
603
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
608
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
drivers/net/ethernet/mellanox/mlxsw/pci.c
609
for (i = 0; i < q->count; i++) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
610
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
drivers/net/ethernet/mellanox/mlxsw/pci.c
612
mlxsw_pci_rdq_page_free(q, elem_info, j);
drivers/net/ethernet/mellanox/mlxsw/pci.c
617
struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
619
q->u.cq.v = mlxsw_pci->max_cqe_ver;
drivers/net/ethernet/mellanox/mlxsw/pci.c
621
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
drivers/net/ethernet/mellanox/mlxsw/pci.c
622
q->num < mlxsw_pci->num_sdqs &&
drivers/net/ethernet/mellanox/mlxsw/pci.c
624
q->u.cq.v = MLXSW_PCI_CQE_V1;
drivers/net/ethernet/mellanox/mlxsw/pci.c
654
struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
666
spin_lock(&q->lock);
drivers/net/ethernet/mellanox/mlxsw/pci.c
667
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
686
if (q->consumer_counter++ != consumer_counter_limit)
drivers/net/ethernet/mellanox/mlxsw/pci.c
688
spin_unlock(&q->lock);
drivers/net/ethernet/mellanox/mlxsw/pci.c
742
struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
755
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
757
if (q->consumer_counter++ != consumer_counter_limit)
drivers/net/ethernet/mellanox/mlxsw/pci.c
764
err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
drivers/net/ethernet/mellanox/mlxsw/pci.c
769
err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
drivers/net/ethernet/mellanox/mlxsw/pci.c
773
skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
drivers/net/ethernet/mellanox/mlxsw/pci.c
776
mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
drivers/net/ethernet/mellanox/mlxsw/pci.c
817
q->producer_counter++;
drivers/net/ethernet/mellanox/mlxsw/pci.c
821
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
827
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
829
owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
drivers/net/ethernet/mellanox/mlxsw/pci.c
830
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
drivers/net/ethernet/mellanox/mlxsw/pci.c
832
q->consumer_counter++;
drivers/net/ethernet/mellanox/mlxsw/pci.c
837
static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
842
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
843
owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem);
drivers/net/ethernet/mellanox/mlxsw/pci.c
844
return !mlxsw_pci_elem_hw_owned(q, owner_bit);
drivers/net/ethernet/mellanox/mlxsw/pci.c
849
struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
drivers/net/ethernet/mellanox/mlxsw/pci.c
851
struct mlxsw_pci_queue *rdq = q->u.cq.dq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
852
struct mlxsw_pci *mlxsw_pci = q->pci;
drivers/net/ethernet/mellanox/mlxsw/pci.c
860
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
862
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
863
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
876
wqe_counter, q->u.cq.v, cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
882
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
892
if (mlxsw_pci_cq_cqe_to_handle(q))
drivers/net/ethernet/mellanox/mlxsw/pci.c
904
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
911
struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
drivers/net/ethernet/mellanox/mlxsw/pci.c
913
struct mlxsw_pci_queue *sdq = q->u.cq.dq;
drivers/net/ethernet/mellanox/mlxsw/pci.c
914
struct mlxsw_pci *mlxsw_pci = q->pci;
drivers/net/ethernet/mellanox/mlxsw/pci.c
918
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
drivers/net/ethernet/mellanox/mlxsw/pci.c
920
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
921
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
drivers/net/ethernet/mellanox/mlxsw/pci.c
934
memcpy(ncqe, cqe, q->elem_size);
drivers/net/ethernet/mellanox/mlxsw/pci.c
935
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
938
wqe_counter, q->u.cq.v, ncqe, budget);
drivers/net/ethernet/mellanox/mlxsw/pci.c
952
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
drivers/net/ethernet/mellanox/mlxsw/pci.c
959
const struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
964
if (q->num < mlxsw_pci->num_sdqs)
drivers/net/ethernet/mellanox/mlxsw/pci.c
970
static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
973
struct mlxsw_pci *mlxsw_pci = q->pci;
drivers/net/ethernet/mellanox/mlxsw/pci.c
977
netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi,
drivers/net/ethernet/mellanox/mlxsw/pci.c
981
netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi,
drivers/net/ethernet/mellanox/mlxsw/pci.c
987
static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
drivers/net/ethernet/mellanox/mlxsw/pci.c
989
netif_napi_del(&q->u.cq.napi);
drivers/net/ethernet/mellanox/mlxsw/pci.c
992
static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
drivers/net/ethernet/mellanox/mlxsw/pci.c
996
struct mlxsw_pci *mlxsw_pci = q->pci;
drivers/net/ethernet/microsoft/mana/mana_en.c
591
int q;
drivers/net/ethernet/microsoft/mana/mana_en.c
603
for (q = 0; q < num_queues; q++) {
drivers/net/ethernet/microsoft/mana/mana_en.c
604
rx_stats = &apc->rxqs[q]->stats;
drivers/net/ethernet/microsoft/mana/mana_en.c
616
for (q = 0; q < num_queues; q++) {
drivers/net/ethernet/microsoft/mana/mana_en.c
617
tx_stats = &apc->tx_qp[q].txq.stats;
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
214
int q, i = 0;
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
225
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
226
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
228
for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++)
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
229
data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset);
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
231
for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
232
data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
234
for (q = 0; q < num_queues; q++) {
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
235
rx_stats = &apc->rxqs[q]->stats;
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
253
for (q = 0; q < num_queues; q++) {
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
254
tx_stats = &apc->tx_qp[q].txq.stats;
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
685
u8 vnic, u8 q)
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
690
FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCIE_Q, q) |
drivers/net/ethernet/netronome/nfp/nfp_net.h
856
static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val)
drivers/net/ethernet/netronome/nfp/nfp_net.h
858
writel(val, q + NFP_QCP_QUEUE_ADD_RPTR);
drivers/net/ethernet/netronome/nfp/nfp_net.h
867
static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val)
drivers/net/ethernet/netronome/nfp/nfp_net.h
869
writel(val, q + NFP_QCP_QUEUE_ADD_WPTR);
drivers/net/ethernet/netronome/nfp/nfp_net.h
872
static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr)
drivers/net/ethernet/netronome/nfp/nfp_net.h
882
val = readl(q + off);
drivers/net/ethernet/netronome/nfp/nfp_net.h
896
static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q)
drivers/net/ethernet/netronome/nfp/nfp_net.h
898
return _nfp_qcp_read(q, NFP_QCP_READ_PTR);
drivers/net/ethernet/netronome/nfp/nfp_net.h
907
static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
drivers/net/ethernet/netronome/nfp/nfp_net.h
909
return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
122
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
125
qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
139
debugfs_create_u32("index", 0400, q_dentry, &q->index);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
140
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
141
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
142
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
143
debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
144
debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
145
debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
147
debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
148
debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
153
desc_blob->data = q->base;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
154
desc_blob->size = (unsigned long)q->num_descs * q->desc_size;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
161
desc_blob->data = q->sg_base;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
162
desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
77
struct ionic_queue *q = seq->private;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
79
seq_printf(seq, "%d\n", q->tail_idx);
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
87
struct ionic_queue *q = seq->private;
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
89
seq_printf(seq, "%d\n", q->head_idx);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1023
struct ionic_queue *q, unsigned int index, const char *name,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1036
q->lif = lif;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1037
q->index = index;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1038
q->num_descs = num_descs;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1039
q->desc_size = desc_size;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1040
q->sg_desc_size = sg_desc_size;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1041
q->tail_idx = 0;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1042
q->head_idx = 0;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1043
q->pid = pid;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1045
snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1050
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1052
struct ionic_lif *lif = q->lif;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1053
struct device *dev = q->dev;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1055
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1058
q->lif->index, q->name, q->hw_type, q->hw_index,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1059
q->head_idx, ring_doorbell);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1062
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1063
q->dbval | q->head_idx);
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1065
q->dbell_jiffies = jiffies;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1069
bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1073
mask = q->num_descs - 1;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1074
tail = q->tail_idx;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
1075
head = q->head_idx;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
64
then = qcq->q.dbell_jiffies;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
67
if (dif > qcq->q.dbell_deadline)
drivers/net/ethernet/pensando/ionic/ionic_dev.c
868
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_dev.c
874
.q_init.type = q->type,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
875
.q_init.ver = qcq->q.lif->qtype_info[q->type].version,
drivers/net/ethernet/pensando/ionic/ionic_dev.c
876
.q_init.index = cpu_to_le32(q->index),
drivers/net/ethernet/pensando/ionic/ionic_dev.c
879
.q_init.pid = cpu_to_le16(q->pid),
drivers/net/ethernet/pensando/ionic/ionic_dev.c
881
.q_init.ring_size = ilog2(q->num_descs),
drivers/net/ethernet/pensando/ionic/ionic_dev.c
882
.q_init.ring_base = cpu_to_le64(q->base_pa),
drivers/net/ethernet/pensando/ionic/ionic_dev.h
310
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_dev.h
312
unsigned int avail = q->tail_idx;
drivers/net/ethernet/pensando/ionic/ionic_dev.h
314
if (q->head_idx >= avail)
drivers/net/ethernet/pensando/ionic/ionic_dev.h
315
avail += q->num_descs - q->head_idx - 1;
drivers/net/ethernet/pensando/ionic/ionic_dev.h
317
avail -= q->head_idx + 1;
drivers/net/ethernet/pensando/ionic/ionic_dev.h
322
static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
drivers/net/ethernet/pensando/ionic/ionic_dev.h
324
return ionic_q_space_avail(q) >= want;
drivers/net/ethernet/pensando/ionic/ionic_dev.h
371
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
382
struct ionic_queue *q, unsigned int index, const char *name,
drivers/net/ethernet/pensando/ionic/ionic_dev.h
385
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
386
bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
393
bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
394
bool ionic_txq_poke_doorbell(struct ionic_queue *q);
drivers/net/ethernet/pensando/ionic/ionic_dev.h
395
bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1005
txq->q.features = features;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1065
rxq->q.features = features;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1078
ionic_rx_fill(&rxq->q, NULL);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1186
qtype = lif->hwstamp_rxq->q.type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1189
qid = lif->hwstamp_rxq->q.index;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1260
ionic_adminq_poke_doorbell(&lif->adminqcq->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1262
ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
1264
ionic_txq_poke_doorbell(&lif->hwstamp_txq->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2006
ionic_tx_empty(&lif->txqcqs[i]->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2013
ionic_rx_empty(&lif->rxqcqs[i]->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2021
ionic_tx_empty(&lif->hwstamp_txq->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2026
ionic_rx_empty(&lif->hwstamp_rxq->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2129
lif->rxqcqs[i]->q.features = lif->rxq_features;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2199
ionic_rx_fill(&lif->rxqcqs[i]->q,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2200
READ_ONCE(lif->rxqcqs[i]->q.xdp_prog));
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2213
ionic_rx_fill(&lif->hwstamp_rxq->q, NULL);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
232
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
241
"%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2683
static void ionic_unregister_rxq_info(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2687
if (!q->xdp_rxq_info)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2690
xi = q->xdp_rxq_info;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2691
q->xdp_rxq_info = NULL;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2697
static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2706
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2708
netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg failed, err %d\n",
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2709
q->index, err);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2713
err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_POOL, q->page_pool);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2715
netdev_err(q->lif->netdev, "q%d xdp_rxq_info_reg_mem_model failed, err %d\n",
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2716
q->index, err);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2721
q->xdp_rxq_info = rxq_info;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2740
struct ionic_queue *q = &lif->rxqcqs[i]->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2742
WRITE_ONCE(q->xdp_prog, xdp_prog);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2889
if (a->q.type == IONIC_QTYPE_RXQ) {
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2890
swap(a->q.page_pool, b->q.page_pool);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2891
a->q.page_pool->p.napi = &a->napi;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2892
if (b->q.page_pool) /* is NULL when increasing queue count */
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2893
b->q.page_pool->p.napi = &b->napi;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2896
swap(a->q.features, b->q.features);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2897
swap(a->q.num_descs, b->q.num_descs);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2898
swap(a->q.desc_size, b->q.desc_size);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2899
swap(a->q.base, b->q.base);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2900
swap(a->q.base_pa, b->q.base_pa);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2901
swap(a->q.info, b->q.info);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2902
swap(a->q.xdp_prog, b->q.xdp_prog);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2903
swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2904
swap(a->q.partner, b->q.partner);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2909
swap(a->q.sg_desc_size, b->q.sg_desc_size);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2910
swap(a->q.sg_base, b->q.sg_base);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2911
swap(a->q.sg_base_pa, b->q.sg_base_pa);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
292
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
2925
ionic_debugfs_add_qcq(a->q.lif, a);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
293
struct ionic_lif *lif = q->lif;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
302
.type = q->type,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3029
rx_qcqs[i]->q.features = qparam->rxq_features;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
303
.index = cpu_to_le32(q->index),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3030
rx_qcqs[i]->q.xdp_prog = qparam->xdp_prog;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
337
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
352
q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3565
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3570
q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3582
q->hw_type = comp.hw_type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3583
q->hw_index = le32_to_cpu(comp.hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3584
q->dbval = IONIC_DBELL_QID(q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3586
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3587
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3589
q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3590
q->dbell_jiffies = jiffies;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3612
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3620
.type = q->type,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3621
.ver = lif->qtype_info[q->type].version,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3622
.index = cpu_to_le32(q->index),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3626
.pid = cpu_to_le16(q->pid),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3627
.ring_size = ilog2(q->num_descs),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3628
.ring_base = cpu_to_le64(q->base_pa),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3642
q->hw_type = ctx.comp.q_init.hw_type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3643
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3644
q->dbval = IONIC_DBELL_QID(q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3646
dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3647
dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
3650
q->admin_info[0].ctx = lif;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
375
ctx.cmd.q_control.type = q->type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
376
ctx.cmd.q_control.index = cpu_to_le32(q->index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
393
ionic_unregister_rxq_info(&qcq->q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
451
page_pool_destroy(qcq->q.page_pool);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
452
qcq->q.page_pool = NULL;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
455
vfree(qcq->q.info);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
456
qcq->q.info = NULL;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
517
qcq->q.name, err);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
52
static void ionic_unregister_rxq_info(struct ionic_queue *q);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
53
static int ionic_register_rxq_info(struct ionic_queue *q, unsigned int napi_id);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
545
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
578
new->q.dev = dev;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
581
new->q.info = vcalloc(num_descs, desc_info_size);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
582
if (!new->q.info) {
drivers/net/ethernet/pensando/ionic/ionic_lif.c
604
new->q.page_pool = page_pool_create(&pp_params);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
605
if (IS_ERR(new->q.page_pool)) {
drivers/net/ethernet/pensando/ionic/ionic_lif.c
607
err = PTR_ERR(new->q.page_pool);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
608
new->q.page_pool = NULL;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
613
new->q.type = type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
614
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
616
err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
62
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
650
new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
651
new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
654
new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
656
new->cq.bound_q = &new->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
66
q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
667
new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
668
new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
67
if (q->type == IONIC_QTYPE_RXQ)
drivers/net/ethernet/pensando/ionic/ionic_lif.c
693
new->q.cmb_base = new->cmb_q_base;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
694
new->q.cmb_base_pa = new->cmb_q_base_pa;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
708
new->cq.bound_q = &new->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
71
lif = q->lif;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
720
new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
721
new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
747
page_pool_destroy(new->q.page_pool);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
749
vfree(new->q.info);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
820
qcq->q.tail_idx = 0;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
821
qcq->q.head_idx = 0;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
834
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
841
.type = q->type,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
842
.ver = lif->qtype_info[q->type].version,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
843
.index = cpu_to_le32(q->index),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
847
.pid = cpu_to_le16(q->pid),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
848
.ring_size = ilog2(q->num_descs),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
849
.ring_base = cpu_to_le64(q->base_pa),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
851
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
852
.features = cpu_to_le64(q->features),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
878
q->hw_type = ctx.comp.q_init.hw_type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
879
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
880
q->dbval = IONIC_DBELL_QID(q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
882
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
883
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
885
q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
886
q->dbell_jiffies = jiffies;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
899
struct ionic_queue *q = &qcq->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
906
.type = q->type,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
907
.ver = lif->qtype_info[q->type].version,
drivers/net/ethernet/pensando/ionic/ionic_lif.c
908
.index = cpu_to_le32(q->index),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
911
.pid = cpu_to_le16(q->pid),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
912
.ring_size = ilog2(q->num_descs),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
913
.ring_base = cpu_to_le64(q->base_pa),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
915
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
916
.features = cpu_to_le64(q->features),
drivers/net/ethernet/pensando/ionic/ionic_lif.c
921
q->partner = &lif->txqcqs[q->index]->q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
922
q->partner->partner = q;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
947
q->hw_type = ctx.comp.q_init.hw_type;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
948
q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
949
q->dbval = IONIC_DBELL_QID(q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
951
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
952
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
drivers/net/ethernet/pensando/ionic/ionic_lif.c
954
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
955
q->dbell_jiffies = jiffies;
drivers/net/ethernet/pensando/ionic/ionic_lif.c
961
err = ionic_register_rxq_info(q, qcq->napi.napi_id);
drivers/net/ethernet/pensando/ionic/ionic_lif.h
333
static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_lif.h
335
return q->features & IONIC_TXQ_F_HWSTAMP;
drivers/net/ethernet/pensando/ionic/ionic_lif.h
88
struct ionic_queue q;
drivers/net/ethernet/pensando/ionic/ionic_lif.h
96
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
drivers/net/ethernet/pensando/ionic/ionic_lif.h
97
#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
drivers/net/ethernet/pensando/ionic/ionic_lif.h
98
#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
drivers/net/ethernet/pensando/ionic/ionic_main.c
198
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
206
q = &lif->adminqcq->q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
208
while (q->tail_idx != q->head_idx) {
drivers/net/ethernet/pensando/ionic/ionic_main.c
209
desc = &q->adminq[q->tail_idx];
drivers/net/ethernet/pensando/ionic/ionic_main.c
210
desc_info = &q->admin_info[q->tail_idx];
drivers/net/ethernet/pensando/ionic/ionic_main.c
213
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/pensando/ionic/ionic_main.c
257
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
263
q = cq->bound_q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
264
lif = q->admin_info[0].ctx;
drivers/net/ethernet/pensando/ionic/ionic_main.c
308
struct ionic_queue *q = cq->bound_q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
318
if (q->tail_idx == q->head_idx)
drivers/net/ethernet/pensando/ionic/ionic_main.c
322
desc_info = &q->admin_info[q->tail_idx];
drivers/net/ethernet/pensando/ionic/ionic_main.c
323
index = q->tail_idx;
drivers/net/ethernet/pensando/ionic/ionic_main.c
324
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/pensando/ionic/ionic_main.c
330
dev_dbg(q->dev, "comp admin queue command:\n");
drivers/net/ethernet/pensando/ionic/ionic_main.c
341
bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_main.c
343
struct ionic_lif *lif = q->lif;
drivers/net/ethernet/pensando/ionic/ionic_main.c
349
if (q->tail_idx == q->head_idx) {
drivers/net/ethernet/pensando/ionic/ionic_main.c
355
then = q->dbell_jiffies;
drivers/net/ethernet/pensando/ionic/ionic_main.c
358
if (dif > q->dbell_deadline) {
drivers/net/ethernet/pensando/ionic/ionic_main.c
359
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
drivers/net/ethernet/pensando/ionic/ionic_main.c
360
q->dbval | q->head_idx);
drivers/net/ethernet/pensando/ionic/ionic_main.c
362
q->dbell_jiffies = now;
drivers/net/ethernet/pensando/ionic/ionic_main.c
375
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
384
q = &lif->adminqcq->q;
drivers/net/ethernet/pensando/ionic/ionic_main.c
386
if (!ionic_q_has_space(q, 1)) {
drivers/net/ethernet/pensando/ionic/ionic_main.c
395
desc_info = &q->admin_info[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_main.c
398
desc = &q->adminq[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_main.c
405
ionic_q_post(q, true);
drivers/net/ethernet/pensando/ionic/ionic_stats.c
222
ionic_add_lif_txq_stats(lif, lif->hwstamp_txq->q.index, stats);
drivers/net/ethernet/pensando/ionic/ionic_stats.c
225
ionic_add_lif_rxq_stats(lif, lif->hwstamp_rxq->q.index, stats);
drivers/net/ethernet/pensando/ionic/ionic_stats.c
288
ionic_sw_stats_get_tx_strings(lif, buf, lif->hwstamp_txq->q.index);
drivers/net/ethernet/pensando/ionic/ionic_stats.c
294
ionic_sw_stats_get_rx_strings(lif, buf, lif->hwstamp_rxq->q.index);
drivers/net/ethernet/pensando/ionic/ionic_stats.c
349
ionic_sw_stats_get_txq_values(lif, buf, lif->hwstamp_txq->q.index);
drivers/net/ethernet/pensando/ionic/ionic_stats.c
355
ionic_sw_stats_get_rxq_values(lif, buf, lif->hwstamp_rxq->q.index);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1029
ionic_rxq_poke_doorbell(&qcq->q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
105
static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
107
if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1071
ionic_rxq_poke_doorbell(&rxqcq->q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1073
ionic_txq_poke_doorbell(&txqcq->q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1079
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
108
return q->txq_sgl_v1[q->head_idx].elems;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1082
struct device *dev = q->dev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1088
dev_name(dev), q->name);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1089
q_to_tx_stats(q)->dma_map_err++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1095
static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1099
struct device *dev = q->dev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
110
return q->txq_sgl[q->head_idx].elems;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1105
dev_name(dev), q->name);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1106
q_to_tx_stats(q)->dma_map_err++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1112
static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1116
struct device *dev = q->dev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1122
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1132
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
114
struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1157
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
116
return netdev_get_tx_queue(netdev, q->index);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1161
struct device *dev = q->dev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1177
static void ionic_tx_clean(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1182
struct ionic_tx_stats *stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1183
struct ionic_qcq *qcq = q_to_qcq(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1187
ionic_xdp_tx_desc_clean(q->partner, desc_info, in_napi);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1190
if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1191
netif_wake_subqueue(q->lif->netdev, q->index);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1196
ionic_tx_desc_unmap_bufs(q, desc_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1202
if (unlikely(ionic_txq_hwstamp_enabled(q))) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1217
hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1241
struct ionic_queue *q = cq->bound_q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1256
desc_info = &q->tx_info[q->tail_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1258
index = q->tail_idx;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1259
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1260
ionic_tx_clean(q, desc_info, comp, in_napi);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
129
static void __ionic_rx_put_buf(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1295
struct ionic_queue *q = cq->bound_q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1297
if (likely(!ionic_txq_hwstamp_enabled(q)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1298
netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1300
ionic_q_space_avail(q),
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1317
void ionic_tx_empty(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1324
while (q->head_idx != q->tail_idx) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1325
desc_info = &q->tx_info[q->tail_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1327
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1328
ionic_tx_clean(q, desc_info, NULL, false);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1336
if (likely(!ionic_txq_hwstamp_enabled(q))) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1337
struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
136
page_pool_put_full_page(q->page_pool, buf_info->page, recycle_direct);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1389
static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1413
ionic_write_cmb_desc(q, desc);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1417
if (likely(!ionic_txq_hwstamp_enabled(q)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1418
netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1419
ionic_txq_post(q, false);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1421
ionic_txq_post(q, done);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1425
static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1428
struct ionic_tx_stats *stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
143
static void ionic_rx_put_buf(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
146
__ionic_rx_put_buf(q, buf_info, false);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1467
desc_info = &q->tx_info[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1468
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
149
static void ionic_rx_put_buf_direct(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
15
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1512
desc = &q->txq[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1513
elem = ionic_tx_sg_elems(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
152
__ionic_rx_put_buf(q, buf_info, true);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1531
ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1536
desc_info = &q->tx_info[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1548
static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
155
static void ionic_rx_add_skb_frag(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1551
struct ionic_txq_desc *desc = &q->txq[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1553
struct ionic_tx_stats *stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1579
ionic_write_cmb_desc(q, desc);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1587
static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1590
struct ionic_txq_desc *desc = &q->txq[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1592
struct ionic_tx_stats *stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1618
ionic_write_cmb_desc(q, desc);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
162
page_pool_dma_sync_for_cpu(q->page_pool,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1623
static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1627
struct ionic_tx_stats *stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1631
elem = ionic_tx_sg_elems(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1640
static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1643
struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1644
struct ionic_tx_stats *stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1647
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1654
ionic_tx_calc_csum(q, skb, desc_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1656
ionic_tx_calc_no_csum(q, skb, desc_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1659
ionic_tx_skb_frags(q, skb, desc_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1665
if (likely(!ionic_txq_hwstamp_enabled(q))) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1666
struct netdev_queue *ndq = q_to_ndq(netdev, q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1668
if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1673
ionic_txq_post(q, ring_dbell);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1678
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1703
if (unlikely(nr_frags > q->max_sg_elems)) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1737
if (desc_bufs > q->max_sg_elems + 1) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1760
q_to_tx_stats(q)->linearize++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1770
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1778
q = &lif->hwstamp_txq->q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1779
ndescs = ionic_tx_descs_needed(q, skb);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1783
if (unlikely(!ionic_q_has_space(q, ndescs)))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1788
err = ionic_tx_tso(netdev, q, skb);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
179
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1790
err = ionic_tx(netdev, q, skb);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1798
q->drop++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
18
static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1807
struct ionic_queue *q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1822
q = &lif->txqcqs[queue_index]->q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1824
ndescs = ionic_tx_descs_needed(q, skb);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1828
if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1829
ionic_q_space_avail(q),
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1834
err = ionic_tx_tso(netdev, q, skb);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1836
err = ionic_tx(netdev, q, skb);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
1844
q->drop++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
194
skb = napi_get_frags(&q_to_qcq(q)->napi);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
197
dev_name(q->dev), q->name);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
198
q_to_rx_stats(q)->alloc_err++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
211
ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
219
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
22
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
231
struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
239
struct device *dev = q->dev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
245
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
248
dev_name(dev), q->name);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
249
q_to_rx_stats(q)->alloc_err++;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
25
static void ionic_tx_clean(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
255
page_pool_dma_sync_for_cpu(q->page_pool,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
266
ionic_rx_put_buf_direct(q, buf_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
269
ionic_rx_put_buf_direct(q, buf_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
274
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
292
ionic_tx_desc_unmap_bufs(q, desc_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
30
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
304
static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
316
desc_info = &q->tx_info[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
317
desc = &q->txq[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
319
stats = q_to_tx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
324
dma_sync_single_for_device(q->dev, dma_addr,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
327
dma_addr = ionic_tx_map_single(q, frame->data, len);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
351
elem = ionic_tx_sg_elems(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
358
dma_sync_single_for_device(q->dev, dma_addr,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
36
ionic_q_post(q, ring_dbell);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
362
dma_addr = ionic_tx_map_frag(q, frag, 0,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
365
ionic_tx_desc_unmap_bufs(q, desc_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
39
static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
392
ionic_txq_post(q, ring_doorbell);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
41
ionic_q_post(q, ring_dbell);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
422
txq = &lif->txqcqs[qi]->q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
44
bool ionic_txq_poke_doorbell(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
458
static void ionic_xdp_rx_unlink_bufs(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
50
netdev = q->lif->netdev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
51
netdev_txq = netdev_get_tx_queue(netdev, q->index);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
55
if (q->tail_idx == q->head_idx) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
607
static void ionic_rx_clean(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
61
then = q->dbell_jiffies;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
612
struct net_device *netdev = q->lif->netdev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
613
struct ionic_qcq *qcq = q_to_qcq(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
621
stats = q_to_rx_stats(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
628
dev_dbg(q->dev, "q%d drop comp->status %d comp->len %d desc->len %d\n",
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
629
q->index, comp->status, comp->len, q->rxq[q->head_idx].len);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
64
if (dif > q->dbell_deadline) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
640
if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
646
use_copybreak = len <= q->lif->rx_copybreak;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
648
skb = ionic_rx_copybreak(netdev, q, desc_info,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
65
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
652
skb = ionic_rx_build_skb(q, desc_info, headroom, len,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
66
q->dbval | q->head_idx);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
660
skb_record_rx_queue(skb, q->index);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
68
q->dbell_jiffies = now;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
700
if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
713
skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
729
struct ionic_queue *q = cq->bound_q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
738
if (q->tail_idx == q->head_idx)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
741
if (q->tail_idx != le16_to_cpu(comp->comp_index))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
744
desc_info = &q->rx_info[q->tail_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
745
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
748
ionic_rx_clean(q, desc_info, comp, xdp_prog);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
758
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
76
bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
764
if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
765
memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
768
void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
770
struct net_device *netdev = q->lif->netdev;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
787
n_fill = ionic_q_space_avail(q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
790
q->num_descs / IONIC_RX_FILL_DIV);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
815
desc = &q->rxq[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
816
desc_info = &q->rx_info[q->head_idx];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
82
if (q->tail_idx == q->head_idx)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
824
buf_info->page = page_pool_alloc(q->page_pool,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
840
sg_elem = q->rxq_sgl[q->head_idx].elems;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
841
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
846
ionic_rx_put_buf_direct(q, buf_info);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
851
buf_info->page = page_pool_alloc(q->page_pool,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
86
then = q->dbell_jiffies;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
869
if (j < q->max_sg_elems)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
876
ionic_write_cmb_desc(q, desc);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
878
ionic_rxq_post(q, false);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
881
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
882
q->dbval | q->head_idx);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
884
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
885
q->dbell_jiffies = jiffies;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
888
void ionic_rx_empty(struct ionic_queue *q)
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
89
if (dif > q->dbell_deadline) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
893
for (i = 0; i < q->num_descs; i++) {
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
894
desc_info = &q->rx_info[i];
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
896
ionic_rx_put_buf(q, &desc_info->bufs[j]);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
90
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
900
q->head_idx = 0;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
901
q->tail_idx = 0;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
91
q->dbval | q->head_idx);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
914
lif = qcq->q.lif;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
93
q->dbell_jiffies = now;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
95
dif = 2 * q->dbell_deadline;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
964
ionic_txq_poke_doorbell(&qcq->q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
980
struct ionic_queue *q = cq->bound_q;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
987
xdp_prog = READ_ONCE(q->xdp_prog);
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
99
q->dbell_deadline = dif;
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
997
ionic_rx_fill(q, xdp_prog);
drivers/net/ethernet/pensando/ionic/ionic_txrx.h
11
void ionic_rx_fill(struct ionic_queue *q, struct bpf_prog *xdp_prog);
drivers/net/ethernet/pensando/ionic/ionic_txrx.h
12
void ionic_rx_empty(struct ionic_queue *q);
drivers/net/ethernet/pensando/ionic/ionic_txrx.h
13
void ionic_tx_empty(struct ionic_queue *q);
drivers/net/ethernet/renesas/ravb.h
1038
int (*receive)(struct net_device *ndev, int budget, int q);
drivers/net/ethernet/renesas/ravb_main.c
1020
rx_buff = &priv->rx_buffers[q][entry];
drivers/net/ethernet/renesas/ravb_main.c
1029
page_pool_put_page(priv->rx_pool[q],
drivers/net/ethernet/renesas/ravb_main.c
1035
ravb_rx_rcar_hwstamp(priv, q, desc, skb);
drivers/net/ethernet/renesas/ravb_main.c
1041
napi_gro_receive(&priv->napi[q], skb);
drivers/net/ethernet/renesas/ravb_main.c
1051
priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
drivers/net/ethernet/renesas/ravb_main.c
1052
priv->cur_rx[q] - priv->dirty_rx[q],
drivers/net/ethernet/renesas/ravb_main.c
1060
static int ravb_rx(struct net_device *ndev, int budget, int q)
drivers/net/ethernet/renesas/ravb_main.c
1065
return info->receive(ndev, budget, q);
drivers/net/ethernet/renesas/ravb_main.c
1190
static bool ravb_queue_interrupt(struct net_device *ndev, int q)
drivers/net/ethernet/renesas/ravb_main.c
1199
if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
drivers/net/ethernet/renesas/ravb_main.c
1200
if (napi_schedule_prep(&priv->napi[q])) {
drivers/net/ethernet/renesas/ravb_main.c
1203
ravb_write(ndev, ric0 & ~BIT(q), RIC0);
drivers/net/ethernet/renesas/ravb_main.c
1204
ravb_write(ndev, tic & ~BIT(q), TIC);
drivers/net/ethernet/renesas/ravb_main.c
1206
ravb_write(ndev, BIT(q), RID0);
drivers/net/ethernet/renesas/ravb_main.c
1207
ravb_write(ndev, BIT(q), TID);
drivers/net/ethernet/renesas/ravb_main.c
1209
__napi_schedule(&priv->napi[q]);
drivers/net/ethernet/renesas/ravb_main.c
1255
int q;
drivers/net/ethernet/renesas/ravb_main.c
1263
for (q = RAVB_NC; q >= RAVB_BE; q--) {
drivers/net/ethernet/renesas/ravb_main.c
1264
if (ravb_queue_interrupt(ndev, q))
drivers/net/ethernet/renesas/ravb_main.c
1339
static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
drivers/net/ethernet/renesas/ravb_main.c
1354
if (ravb_queue_interrupt(ndev, q))
drivers/net/ethernet/renesas/ravb_main.c
1380
int q = napi - priv->napi;
drivers/net/ethernet/renesas/ravb_main.c
1381
int mask = BIT(q);
drivers/net/ethernet/renesas/ravb_main.c
1387
work_done = ravb_rx(ndev, budget, q);
drivers/net/ethernet/renesas/ravb_main.c
1393
ravb_tx_free(ndev, q, true);
drivers/net/ethernet/renesas/ravb_main.c
1394
netif_wake_subqueue(ndev, q);
drivers/net/ethernet/renesas/ravb_main.c
1641
int q;
drivers/net/ethernet/renesas/ravb_main.c
1645
for (q = RAVB_BE; q < num_rx_q; q++) {
drivers/net/ethernet/renesas/ravb_main.c
1646
struct net_device_stats *stats = &priv->stats[q];
drivers/net/ethernet/renesas/ravb_main.c
1648
data[i++] = priv->cur_rx[q];
drivers/net/ethernet/renesas/ravb_main.c
1649
data[i++] = priv->cur_tx[q];
drivers/net/ethernet/renesas/ravb_main.c
1650
data[i++] = priv->dirty_rx[q];
drivers/net/ethernet/renesas/ravb_main.c
1651
data[i++] = priv->dirty_tx[q];
drivers/net/ethernet/renesas/ravb_main.c
188
ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
drivers/net/ethernet/renesas/ravb_main.c
191
return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
drivers/net/ethernet/renesas/ravb_main.c
195
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
drivers/net/ethernet/renesas/ravb_main.c
198
struct net_device_stats *stats = &priv->stats[q];
drivers/net/ethernet/renesas/ravb_main.c
205
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
drivers/net/ethernet/renesas/ravb_main.c
208
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
drivers/net/ethernet/renesas/ravb_main.c
210
desc = &priv->tx_ring[q][entry];
drivers/net/ethernet/renesas/ravb_main.c
2121
u16 q = skb_get_queue_mapping(skb);
drivers/net/ethernet/renesas/ravb_main.c
2134
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
drivers/net/ethernet/renesas/ravb_main.c
2138
netif_stop_subqueue(ndev, q);
drivers/net/ethernet/renesas/ravb_main.c
2146
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
drivers/net/ethernet/renesas/ravb_main.c
2147
priv->tx_skb[q][entry / num_tx_desc] = skb;
drivers/net/ethernet/renesas/ravb_main.c
2150
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
drivers/net/ethernet/renesas/ravb_main.c
2175
desc = &priv->tx_ring[q][entry];
drivers/net/ethernet/renesas/ravb_main.c
218
if (priv->tx_skb[q][entry / num_tx_desc]) {
drivers/net/ethernet/renesas/ravb_main.c
2188
desc = &priv->tx_ring[q][entry];
drivers/net/ethernet/renesas/ravb_main.c
2200
if (q == RAVB_NC) {
drivers/net/ethernet/renesas/ravb_main.c
224
dev_kfree_skb_any(priv->tx_skb[q][entry]);
drivers/net/ethernet/renesas/ravb_main.c
225
priv->tx_skb[q][entry] = NULL;
drivers/net/ethernet/renesas/ravb_main.c
2252
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
drivers/net/ethernet/renesas/ravb_main.c
2254
priv->cur_tx[q] += num_tx_desc;
drivers/net/ethernet/renesas/ravb_main.c
2255
if (priv->cur_tx[q] - priv->dirty_tx[q] >
drivers/net/ethernet/renesas/ravb_main.c
2256
(priv->num_tx_ring[q] - 1) * num_tx_desc &&
drivers/net/ethernet/renesas/ravb_main.c
2257
!ravb_tx_free(ndev, q, true))
drivers/net/ethernet/renesas/ravb_main.c
2258
netif_stop_subqueue(ndev, q);
drivers/net/ethernet/renesas/ravb_main.c
2269
priv->tx_skb[q][entry / num_tx_desc] = NULL;
drivers/net/ethernet/renesas/ravb_main.c
238
static void ravb_rx_ring_free(struct net_device *ndev, int q)
drivers/net/ethernet/renesas/ravb_main.c
243
if (!priv->rx_ring[q].raw)
drivers/net/ethernet/renesas/ravb_main.c
246
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
drivers/net/ethernet/renesas/ravb_main.c
247
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
drivers/net/ethernet/renesas/ravb_main.c
248
priv->rx_desc_dma[q]);
drivers/net/ethernet/renesas/ravb_main.c
249
priv->rx_ring[q].raw = NULL;
drivers/net/ethernet/renesas/ravb_main.c
253
static void ravb_ring_free(struct net_device *ndev, int q)
drivers/net/ethernet/renesas/ravb_main.c
260
ravb_rx_ring_free(ndev, q);
drivers/net/ethernet/renesas/ravb_main.c
262
if (priv->tx_ring[q]) {
drivers/net/ethernet/renesas/ravb_main.c
263
ravb_tx_free(ndev, q, false);
drivers/net/ethernet/renesas/ravb_main.c
266
(priv->num_tx_ring[q] * num_tx_desc + 1);
drivers/net/ethernet/renesas/ravb_main.c
267
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
drivers/net/ethernet/renesas/ravb_main.c
268
priv->tx_desc_dma[q]);
drivers/net/ethernet/renesas/ravb_main.c
269
priv->tx_ring[q] = NULL;
drivers/net/ethernet/renesas/ravb_main.c
273
for (i = 0; i < priv->num_rx_ring[q]; i++) {
drivers/net/ethernet/renesas/ravb_main.c
274
if (priv->rx_buffers[q][i].page)
drivers/net/ethernet/renesas/ravb_main.c
275
page_pool_put_page(priv->rx_pool[q],
drivers/net/ethernet/renesas/ravb_main.c
276
priv->rx_buffers[q][i].page,
drivers/net/ethernet/renesas/ravb_main.c
279
kfree(priv->rx_buffers[q]);
drivers/net/ethernet/renesas/ravb_main.c
280
priv->rx_buffers[q] = NULL;
drivers/net/ethernet/renesas/ravb_main.c
281
page_pool_destroy(priv->rx_pool[q]);
drivers/net/ethernet/renesas/ravb_main.c
284
kfree(priv->tx_align[q]);
drivers/net/ethernet/renesas/ravb_main.c
285
priv->tx_align[q] = NULL;
drivers/net/ethernet/renesas/ravb_main.c
290
kfree(priv->tx_skb[q]);
drivers/net/ethernet/renesas/ravb_main.c
2907
int error, q;
drivers/net/ethernet/renesas/ravb_main.c
291
priv->tx_skb[q] = NULL;
drivers/net/ethernet/renesas/ravb_main.c
295
ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
drivers/net/ethernet/renesas/ravb_main.c
3035
for (q = RAVB_BE; q < info->dbat_entry_num; q++)
drivers/net/ethernet/renesas/ravb_main.c
3036
priv->desc_bat[q].die_dt = DT_EOS;
drivers/net/ethernet/renesas/ravb_main.c
304
rx_buff = &priv->rx_buffers[q][entry];
drivers/net/ethernet/renesas/ravb_main.c
306
rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
drivers/net/ethernet/renesas/ravb_main.c
331
ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
drivers/net/ethernet/renesas/ravb_main.c
338
entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
drivers/net/ethernet/renesas/ravb_main.c
339
rx_desc = ravb_rx_get_desc(priv, q, entry);
drivers/net/ethernet/renesas/ravb_main.c
341
if (!priv->rx_buffers[q][entry].page) {
drivers/net/ethernet/renesas/ravb_main.c
342
if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
drivers/net/ethernet/renesas/ravb_main.c
355
static void ravb_ring_format(struct net_device *ndev, int q)
drivers/net/ethernet/renesas/ravb_main.c
362
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
drivers/net/ethernet/renesas/ravb_main.c
366
priv->cur_rx[q] = 0;
drivers/net/ethernet/renesas/ravb_main.c
367
priv->cur_tx[q] = 0;
drivers/net/ethernet/renesas/ravb_main.c
368
priv->dirty_rx[q] = 0;
drivers/net/ethernet/renesas/ravb_main.c
369
priv->dirty_tx[q] = 0;
drivers/net/ethernet/renesas/ravb_main.c
375
rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
drivers/net/ethernet/renesas/ravb_main.c
376
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
drivers/net/ethernet/renesas/ravb_main.c
379
memset(priv->tx_ring[q], 0, tx_ring_size);
drivers/net/ethernet/renesas/ravb_main.c
381
for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
drivers/net/ethernet/renesas/ravb_main.c
389
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
drivers/net/ethernet/renesas/ravb_main.c
393
desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
drivers/net/ethernet/renesas/ravb_main.c
395
desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
drivers/net/ethernet/renesas/ravb_main.c
398
desc = &priv->desc_bat[q];
drivers/net/ethernet/renesas/ravb_main.c
400
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
drivers/net/ethernet/renesas/ravb_main.c
403
static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
drivers/net/ethernet/renesas/ravb_main.c
408
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
drivers/net/ethernet/renesas/ravb_main.c
410
priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
drivers/net/ethernet/renesas/ravb_main.c
411
&priv->rx_desc_dma[q],
drivers/net/ethernet/renesas/ravb_main.c
414
return priv->rx_ring[q].raw;
drivers/net/ethernet/renesas/ravb_main.c
418
static int ravb_ring_init(struct net_device *ndev, int q)
drivers/net/ethernet/renesas/ravb_main.c
425
.pool_size = priv->num_rx_ring[q],
drivers/net/ethernet/renesas/ravb_main.c
434
priv->rx_pool[q] = page_pool_create(&params);
drivers/net/ethernet/renesas/ravb_main.c
435
if (IS_ERR(priv->rx_pool[q]))
drivers/net/ethernet/renesas/ravb_main.c
439
priv->rx_buffers[q] = kzalloc_objs(*priv->rx_buffers[q],
drivers/net/ethernet/renesas/ravb_main.c
440
priv->num_rx_ring[q]);
drivers/net/ethernet/renesas/ravb_main.c
441
if (!priv->rx_buffers[q])
drivers/net/ethernet/renesas/ravb_main.c
445
priv->tx_skb[q] = kzalloc_objs(*priv->tx_skb[q], priv->num_tx_ring[q]);
drivers/net/ethernet/renesas/ravb_main.c
446
if (!priv->tx_skb[q])
drivers/net/ethernet/renesas/ravb_main.c
450
if (!ravb_alloc_rx_desc(ndev, q))
drivers/net/ethernet/renesas/ravb_main.c
454
priv->dirty_rx[q] = 0;
drivers/net/ethernet/renesas/ravb_main.c
455
ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
drivers/net/ethernet/renesas/ravb_main.c
456
memset(priv->rx_ring[q].raw, 0, ring_size);
drivers/net/ethernet/renesas/ravb_main.c
457
num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
drivers/net/ethernet/renesas/ravb_main.c
459
if (num_filled != priv->num_rx_ring[q])
drivers/net/ethernet/renesas/ravb_main.c
464
priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
drivers/net/ethernet/renesas/ravb_main.c
466
if (!priv->tx_align[q])
drivers/net/ethernet/renesas/ravb_main.c
472
(priv->num_tx_ring[q] * num_tx_desc + 1);
drivers/net/ethernet/renesas/ravb_main.c
473
priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
drivers/net/ethernet/renesas/ravb_main.c
474
&priv->tx_desc_dma[q],
drivers/net/ethernet/renesas/ravb_main.c
476
if (!priv->tx_ring[q])
drivers/net/ethernet/renesas/ravb_main.c
482
ravb_ring_free(ndev, q);
drivers/net/ethernet/renesas/ravb_main.c
798
static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
drivers/net/ethernet/renesas/ravb_main.c
812
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
drivers/net/ethernet/renesas/ravb_main.c
813
stats = &priv->stats[q];
drivers/net/ethernet/renesas/ravb_main.c
815
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
drivers/net/ethernet/renesas/ravb_main.c
818
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
drivers/net/ethernet/renesas/ravb_main.c
819
desc = &priv->rx_ring[q].desc[entry];
drivers/net/ethernet/renesas/ravb_main.c
849
rx_buff = &priv->rx_buffers[q][entry];
drivers/net/ethernet/renesas/ravb_main.c
864
page_pool_put_page(priv->rx_pool[q],
drivers/net/ethernet/renesas/ravb_main.c
891
page_pool_put_page(priv->rx_pool[q],
drivers/net/ethernet/renesas/ravb_main.c
924
napi_gro_receive(&priv->napi[q], skb);
drivers/net/ethernet/renesas/ravb_main.c
940
priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
drivers/net/ethernet/renesas/ravb_main.c
941
priv->cur_rx[q] - priv->dirty_rx[q],
drivers/net/ethernet/renesas/ravb_main.c
948
static void ravb_rx_rcar_hwstamp(struct ravb_private *priv, int q,
drivers/net/ethernet/renesas/ravb_main.c
956
if (q == RAVB_NC)
drivers/net/ethernet/renesas/ravb_main.c
973
static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
drivers/net/ethernet/renesas/ravb_main.c
977
struct net_device_stats *stats = &priv->stats[q];
drivers/net/ethernet/renesas/ravb_main.c
986
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
drivers/net/ethernet/renesas/ravb_main.c
987
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
drivers/net/ethernet/renesas/ravb_main.c
988
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
drivers/net/ethernet/renesas/ravb_main.c
989
desc = &priv->rx_ring[q].ex_desc[entry];
drivers/net/ethernet/sfc/ptp.c
1177
static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
drivers/net/ethernet/sfc/ptp.c
1187
__skb_queue_tail(q, skb);
drivers/net/ethernet/sfc/ptp.c
1191
__skb_queue_tail(q, skb);
drivers/net/ethernet/sfc/ptp.c
822
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
drivers/net/ethernet/sfc/ptp.c
826
while ((skb = skb_dequeue(q))) {
drivers/net/ethernet/sfc/siena/ptp.c
1226
static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
drivers/net/ethernet/sfc/siena/ptp.c
1236
__skb_queue_tail(q, skb);
drivers/net/ethernet/sfc/siena/ptp.c
1239
__skb_queue_tail(q, skb);
drivers/net/ethernet/sfc/siena/ptp.c
1243
__skb_queue_tail(q, skb);
drivers/net/ethernet/sfc/siena/ptp.c
835
static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
drivers/net/ethernet/sfc/siena/ptp.c
839
while ((skb = skb_dequeue(q))) {
drivers/net/ethernet/sfc/siena/tx.c
115
struct efx_tx_queue *q;
drivers/net/ethernet/sfc/siena/tx.c
117
efx_for_each_channel_tx_queue(q, channel) {
drivers/net/ethernet/sfc/siena/tx.c
118
if (q->xmit_pending)
drivers/net/ethernet/sfc/siena/tx.c
119
efx_nic_push_buffers(q);
drivers/net/ethernet/sfc/tx.c
290
struct efx_tx_queue *q;
drivers/net/ethernet/sfc/tx.c
292
efx_for_each_channel_tx_queue(q, channel) {
drivers/net/ethernet/sfc/tx.c
293
if (q->xmit_pending)
drivers/net/ethernet/sfc/tx.c
294
efx_nic_push_buffers(q);
drivers/net/ethernet/stmicro/stmmac/common.h
89
struct stmmac_q_tx_stats q;
drivers/net/ethernet/stmicro/stmmac/dwmac4.h
277
#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
440
static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
454
irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
461
static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
475
irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
487
int q;
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
489
for (q = 0; q < tx_cnt; q++) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
490
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
499
*data++ = stmmac_get_tx_normal_irq_n(priv, q);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
502
for (q = 0; q < rx_cnt; q++) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
503
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
512
*data++ = stmmac_get_rx_normal_irq_n(priv, q);
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
597
q_snapshot = txq_stats->q;
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
660
int q, stat;
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
662
for (q = 0; q < tx_cnt; q++) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
664
snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
669
for (q = 0; q < rx_cnt; q++) {
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
671
snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4553
u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4554
u64_stats_inc(&txq_stats->q.tx_tso_frames);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4555
u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4557
u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4844
u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4846
u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
5137
u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7228
int q;
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7230
for (q = 0; q < tx_cnt; q++) {
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7231
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7237
tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7248
for (q = 0; q < rx_cnt; q++) {
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
7249
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
drivers/net/ethernet/ti/davinci_emac.c
1415
int q, m, ret;
drivers/net/ethernet/ti/davinci_emac.c
1564
for (q = res_num - 1; q >= 0; q--) {
drivers/net/ethernet/ti/davinci_emac.c
1565
irq_num = platform_get_irq(priv->pdev, q);
drivers/net/ethernet/ti/davinci_emac.c
1570
for (q = res_num; q >= 0; q--) {
drivers/net/ethernet/ti/davinci_emac.c
1571
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
drivers/net/ethernet/ti/davinci_emac.c
1575
if (q != res_num)
drivers/net/ethernet/ti/netcp_core.c
48
#define knav_queue_get_id(q) knav_queue_device_control(q, \
drivers/net/ethernet/ti/netcp_core.c
51
#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
drivers/net/ethernet/ti/netcp_core.c
55
#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
drivers/net/ethernet/ti/netcp_core.c
59
#define knav_queue_get_count(q) knav_queue_device_control(q, \
drivers/net/ethernet/via/via-velocity.c
1758
int q, int n)
drivers/net/ethernet/via/via-velocity.c
1760
struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
drivers/net/ethernet/via/via-velocity.h
1378
#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
drivers/net/fddi/skfp/h/smc.h
449
struct s_queue q ; /* queue */
drivers/net/fddi/skfp/hwmtm.c
856
if (smc->q.ev_get != smc->q.ev_put) {
drivers/net/fddi/skfp/queue.c
104
if (++ev == &smc->q.ev_queue[MAX_EVENT])
drivers/net/fddi/skfp/queue.c
105
ev = smc->q.ev_queue ;
drivers/net/fddi/skfp/queue.c
108
smc->q.ev_get = ev;
drivers/net/fddi/skfp/queue.c
28
smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ;
drivers/net/fddi/skfp/queue.c
37
smc->q.ev_put->class = class ;
drivers/net/fddi/skfp/queue.c
38
smc->q.ev_put->event = event ;
drivers/net/fddi/skfp/queue.c
39
if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT])
drivers/net/fddi/skfp/queue.c
40
smc->q.ev_put = smc->q.ev_queue ;
drivers/net/fddi/skfp/queue.c
42
if (smc->q.ev_put == smc->q.ev_get) {
drivers/net/fddi/skfp/queue.c
70
ev = smc->q.ev_get ;
drivers/net/fddi/skfp/queue.c
71
PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ;
drivers/net/fddi/skfp/queue.c
72
while (ev != smc->q.ev_put) {
drivers/net/hyperv/netvsc_trace.h
43
TP_PROTO(const struct net_device *ndev, u16 q,
drivers/net/hyperv/netvsc_trace.h
45
TP_ARGS(ndev, q, msg),
drivers/net/hyperv/netvsc_trace.h
55
__entry->queue = q;
drivers/net/hyperv/netvsc_trace.h
66
TP_PROTO(const struct net_device *ndev, u16 q,
drivers/net/hyperv/netvsc_trace.h
68
TP_ARGS(ndev, q, msg)
drivers/net/hyperv/netvsc_trace.h
72
TP_PROTO(const struct net_device *ndev, u16 q,
drivers/net/hyperv/netvsc_trace.h
74
TP_ARGS(ndev, q, msg)
drivers/net/phy/sfp.c
602
const struct sfp_quirk *q;
drivers/net/phy/sfp.c
609
for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
drivers/net/phy/sfp.c
610
if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
drivers/net/phy/sfp.c
611
sfp_match(q->part, id->base.vendor_pn, ps))
drivers/net/phy/sfp.c
612
return q;
drivers/net/ppp/ppp_generic.c
1961
unsigned char *p, *q;
drivers/net/ppp/ppp_generic.c
2137
q = skb_put(frag, flen + hdrlen);
drivers/net/ppp/ppp_generic.c
2140
put_unaligned_be16(PPP_MP, q);
drivers/net/ppp/ppp_generic.c
2142
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
drivers/net/ppp/ppp_generic.c
2143
q[3] = ppp->nxseq;
drivers/net/ppp/ppp_generic.c
2145
q[2] = bits;
drivers/net/ppp/ppp_generic.c
2146
q[3] = ppp->nxseq >> 16;
drivers/net/ppp/ppp_generic.c
2147
q[4] = ppp->nxseq >> 8;
drivers/net/ppp/ppp_generic.c
2148
q[5] = ppp->nxseq;
drivers/net/ppp/ppp_generic.c
2151
memcpy(q + hdrlen, p, flen);
drivers/net/tap.c
1016
tap = tap_get_tap_dev(q);
drivers/net/tap.c
103
struct tap_queue *q)
drivers/net/tap.c
1030
return tun_vnet_ioctl(&q->vnet_hdr_sz, &q->flags, cmd, sp);
drivers/net/tap.c
1045
static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
drivers/net/tap.c
1059
if (q->flags & IFF_VNET_HDR)
drivers/net/tap.c
1060
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
drivers/net/tap.c
1076
err = tun_vnet_hdr_to_skb(q->flags, skb, gso);
drivers/net/tap.c
108
rcu_assign_pointer(q->tap, tap);
drivers/net/tap.c
1087
tap = rcu_dereference(q->tap);
drivers/net/tap.c
109
rcu_assign_pointer(tap->taps[tap->numvtaps], q);
drivers/net/tap.c
110
sock_hold(&q->sk);
drivers/net/tap.c
1103
tap = rcu_dereference(q->tap);
drivers/net/tap.c
1113
struct tap_queue *q = container_of(sock, struct tap_queue, sock);
drivers/net/tap.c
112
q->file = file;
drivers/net/tap.c
1122
tap_get_user_xdp(q, xdp);
drivers/net/tap.c
1127
return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
drivers/net/tap.c
113
q->queue_index = tap->numvtaps;
drivers/net/tap.c
1134
struct tap_queue *q = container_of(sock, struct tap_queue, sock);
drivers/net/tap.c
114
q->enabled = true;
drivers/net/tap.c
1141
ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
drivers/net/tap.c
115
file->private_data = q;
drivers/net/tap.c
1151
struct tap_queue *q = container_of(sock, struct tap_queue,
drivers/net/tap.c
1153
return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
drivers/net/tap.c
116
list_add_tail(&q->next, &tap->queue_list);
drivers/net/tap.c
1169
struct tap_queue *q;
drivers/net/tap.c
1172
q = file->private_data;
drivers/net/tap.c
1173
if (!q)
drivers/net/tap.c
1175
return &q->sock;
drivers/net/tap.c
1181
struct tap_queue *q;
drivers/net/tap.c
1185
q = file->private_data;
drivers/net/tap.c
1186
if (!q)
drivers/net/tap.c
1188
return &q->ring;
drivers/net/tap.c
1195
struct tap_queue *q;
drivers/net/tap.c
1204
list_for_each_entry(q, &tap->queue_list, next)
drivers/net/tap.c
1205
rings[i++] = &q->ring;
drivers/net/tap.c
124
static int tap_disable_queue(struct tap_queue *q)
drivers/net/tap.c
130
if (!q->enabled)
drivers/net/tap.c
133
tap = rtnl_dereference(q->tap);
drivers/net/tap.c
136
int index = q->queue_index;
drivers/net/tap.c
143
q->enabled = false;
drivers/net/tap.c
159
static void tap_put_queue(struct tap_queue *q)
drivers/net/tap.c
164
tap = rtnl_dereference(q->tap);
drivers/net/tap.c
167
if (q->enabled)
drivers/net/tap.c
168
BUG_ON(tap_disable_queue(q));
drivers/net/tap.c
171
RCU_INIT_POINTER(q->tap, NULL);
drivers/net/tap.c
172
sock_put(&q->sk);
drivers/net/tap.c
173
list_del_init(&q->next);
drivers/net/tap.c
179
sock_put(&q->sk);
drivers/net/tap.c
237
struct tap_queue *q, *tmp;
drivers/net/tap.c
240
list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
drivers/net/tap.c
241
list_del_init(&q->next);
drivers/net/tap.c
242
RCU_INIT_POINTER(q->tap, NULL);
drivers/net/tap.c
243
if (q->enabled)
drivers/net/tap.c
246
sock_put(&q->sk);
drivers/net/tap.c
260
struct tap_queue *q;
drivers/net/tap.c
268
q = tap_get_queue(tap, skb);
drivers/net/tap.c
269
if (!q)
drivers/net/tap.c
278
if (q->flags & IFF_VNET_HDR)
drivers/net/tap.c
290
if (ptr_ring_produce(&q->ring, skb)) {
drivers/net/tap.c
300
if (ptr_ring_produce(&q->ring, skb)) {
drivers/net/tap.c
319
if (ptr_ring_produce(&q->ring, skb)) {
drivers/net/tap.c
326
wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
drivers/net/tap.c
441
struct tap_queue *q = container_of(sk, struct tap_queue, sk);
drivers/net/tap.c
443
ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
drivers/net/tap.c
450
struct tap_queue *q;
drivers/net/tap.c
459
q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
drivers/net/tap.c
461
if (!q)
drivers/net/tap.c
463
if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
drivers/net/tap.c
464
sk_free(&q->sk);
drivers/net/tap.c
468
init_waitqueue_head(&q->sock.wq.wait);
drivers/net/tap.c
469
q->sock.type = SOCK_RAW;
drivers/net/tap.c
470
q->sock.state = SS_CONNECTED;
drivers/net/tap.c
471
q->sock.file = file;
drivers/net/tap.c
472
q->sock.ops = &tap_socket_ops;
drivers/net/tap.c
473
sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
drivers/net/tap.c
474
q->sk.sk_write_space = tap_sock_write_space;
drivers/net/tap.c
475
q->sk.sk_destruct = tap_sock_destruct;
drivers/net/tap.c
476
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
drivers/net/tap.c
477
q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
drivers/net/tap.c
487
sock_set_flag(&q->sk, SOCK_ZEROCOPY);
drivers/net/tap.c
489
err = tap_set_queue(tap, file, q);
drivers/net/tap.c
504
sock_put(&q->sk);
drivers/net/tap.c
515
struct tap_queue *q = file->private_data;
drivers/net/tap.c
516
tap_put_queue(q);
drivers/net/tap.c
522
struct tap_queue *q = file->private_data;
drivers/net/tap.c
525
if (!q)
drivers/net/tap.c
529
poll_wait(file, &q->sock.wq.wait, wait);
drivers/net/tap.c
531
if (!ptr_ring_empty(&q->ring))
drivers/net/tap.c
534
if (sock_writeable(&q->sk) ||
drivers/net/tap.c
535
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
drivers/net/tap.c
536
sock_writeable(&q->sk)))
drivers/net/tap.c
572
static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
drivers/net/tap.c
590
if (q->flags & IFF_VNET_HDR) {
drivers/net/tap.c
591
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
drivers/net/tap.c
593
hdr_len = tun_vnet_hdr_get(vnet_hdr_len, q->flags, from, &vnet_hdr);
drivers/net/tap.c
606
if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
drivers/net/tap.c
622
skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
drivers/net/tap.c
642
tap = rcu_dereference(q->tap);
drivers/net/tap.c
651
err = tun_vnet_hdr_to_skb(q->flags, skb, &vnet_hdr);
drivers/net/tap.c
683
tap = rcu_dereference(q->tap);
drivers/net/tap.c
694
struct tap_queue *q = file->private_data;
drivers/net/tap.c
700
return tap_get_user(q, NULL, from, noblock);
drivers/net/tap.c
704
static ssize_t tap_put_user(struct tap_queue *q,
drivers/net/tap.c
713
if (q->flags & IFF_VNET_HDR) {
drivers/net/tap.c
716
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
drivers/net/tap.c
718
ret = tun_vnet_hdr_from_skb(q->flags, NULL, skb, &vnet_hdr);
drivers/net/tap.c
756
static ssize_t tap_do_read(struct tap_queue *q,
drivers/net/tap.c
773
prepare_to_wait(sk_sleep(&q->sk), &wait,
drivers/net/tap.c
777
skb = ptr_ring_consume(&q->ring);
drivers/net/tap.c
792
finish_wait(sk_sleep(&q->sk), &wait);
drivers/net/tap.c
796
ret = tap_put_user(q, skb, to);
drivers/net/tap.c
808
struct tap_queue *q = file->private_data;
drivers/net/tap.c
815
ret = tap_do_read(q, to, noblock, NULL);
drivers/net/tap.c
82
struct tap_queue *q)
drivers/net/tap.c
822
static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
drivers/net/tap.c
827
tap = rtnl_dereference(q->tap);
drivers/net/tap.c
841
struct tap_queue *q = file->private_data;
drivers/net/tap.c
845
tap = tap_get_tap_dev(q);
drivers/net/tap.c
850
ret = tap_enable_queue(tap, file, q);
drivers/net/tap.c
852
ret = tap_disable_queue(q);
drivers/net/tap.c
860
static int set_offload(struct tap_queue *q, unsigned long arg)
drivers/net/tap.c
866
tap = rtnl_dereference(q->tap);
drivers/net/tap.c
88
if (q->enabled)
drivers/net/tap.c
919
struct tap_queue *q = file->private_data;
drivers/net/tap.c
92
rcu_assign_pointer(tap->taps[tap->numvtaps], q);
drivers/net/tap.c
93
q->queue_index = tap->numvtaps;
drivers/net/tap.c
94
q->enabled = true;
drivers/net/tap.c
940
q->flags = (q->flags & ~TAP_IFFEATURES) | u;
drivers/net/tap.c
946
tap = tap_get_tap_dev(q);
drivers/net/tap.c
953
u = q->flags;
drivers/net/tap.c
980
q->sk.sk_sndbuf = s;
drivers/net/tap.c
991
ret = set_offload(q, arg);
drivers/net/tap.c
997
tap = tap_get_tap_dev(q);
drivers/net/usb/catc.c
183
void (*callback)(struct catc *catc, struct ctrl_queue *q);
drivers/net/usb/catc.c
476
struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
drivers/net/usb/catc.c
482
dr->bRequest = q->request;
drivers/net/usb/catc.c
483
dr->bRequestType = 0x40 | q->dir;
drivers/net/usb/catc.c
484
dr->wValue = cpu_to_le16(q->value);
drivers/net/usb/catc.c
485
dr->wIndex = cpu_to_le16(q->index);
drivers/net/usb/catc.c
486
dr->wLength = cpu_to_le16(q->len);
drivers/net/usb/catc.c
488
urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
drivers/net/usb/catc.c
489
urb->transfer_buffer_length = q->len;
drivers/net/usb/catc.c
494
if (!q->dir && q->buf && q->len)
drivers/net/usb/catc.c
495
memcpy(catc->ctrl_buf, q->buf, q->len);
drivers/net/usb/catc.c
505
struct ctrl_queue *q;
drivers/net/usb/catc.c
515
q = catc->ctrl_queue + catc->ctrl_tail;
drivers/net/usb/catc.c
517
if (q->dir) {
drivers/net/usb/catc.c
518
if (q->buf && q->len)
drivers/net/usb/catc.c
519
memcpy(q->buf, catc->ctrl_buf, q->len);
drivers/net/usb/catc.c
521
q->buf = catc->ctrl_buf;
drivers/net/usb/catc.c
524
if (q->callback)
drivers/net/usb/catc.c
525
q->callback(catc, q);
drivers/net/usb/catc.c
538
u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q))
drivers/net/usb/catc.c
540
struct ctrl_queue *q;
drivers/net/usb/catc.c
546
q = catc->ctrl_queue + catc->ctrl_head;
drivers/net/usb/catc.c
548
q->dir = dir;
drivers/net/usb/catc.c
549
q->request = request;
drivers/net/usb/catc.c
550
q->value = value;
drivers/net/usb/catc.c
551
q->index = index;
drivers/net/usb/catc.c
552
q->buf = buf;
drivers/net/usb/catc.c
553
q->len = len;
drivers/net/usb/catc.c
554
q->callback = callback;
drivers/net/usb/catc.c
576
static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
drivers/net/usb/catc.c
578
int index = q->index - EthStats;
drivers/net/usb/catc.c
581
catc->stats_buf[index] = *((char *)q->buf);
drivers/net/usb/lan78xx.c
2926
static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
drivers/net/usb/lan78xx.c
2932
spin_lock_irqsave(&q->lock, flags);
drivers/net/usb/lan78xx.c
2933
while (!skb_queue_empty(q)) {
drivers/net/usb/lan78xx.c
2938
skb_queue_walk(q, skb) {
drivers/net/usb/lan78xx.c
2955
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/usb/lan78xx.c
2965
spin_lock_irqsave(&q->lock, flags);
drivers/net/usb/lan78xx.c
2967
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/usb/usbnet.c
732
static int unlink_urbs(struct usbnet *dev, struct sk_buff_head *q)
drivers/net/usb/usbnet.c
738
spin_lock_irqsave (&q->lock, flags);
drivers/net/usb/usbnet.c
739
while (!skb_queue_empty(q)) {
drivers/net/usb/usbnet.c
744
skb_queue_walk(q, skb) {
drivers/net/usb/usbnet.c
762
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/usb/usbnet.c
771
spin_lock_irqsave(&q->lock, flags);
drivers/net/usb/usbnet.c
773
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/usb/usbnet.c
791
static void wait_skb_queue_empty(struct sk_buff_head *q)
drivers/net/usb/usbnet.c
795
spin_lock_irqsave(&q->lock, flags);
drivers/net/usb/usbnet.c
796
while (!skb_queue_empty(q)) {
drivers/net/usb/usbnet.c
797
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/usb/usbnet.c
800
spin_lock_irqsave(&q->lock, flags);
drivers/net/usb/usbnet.c
802
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/veth.c
579
sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
drivers/net/veth.c
586
xdp_return_frame(bq->q[i]);
drivers/net/veth.c
633
bq->q[bq->count++] = frame;
drivers/net/veth.c
83
struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
drivers/net/virtio_net.c
1093
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
drivers/net/virtio_net.c
1095
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
drivers/net/virtio_net.c
1097
else if (q < vi->curr_queue_pairs)
drivers/net/virtio_net.c
1701
#define virtnet_xdp_put_sq(vi, q) { \
drivers/net/virtio_net.c
1705
txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
drivers/net/wireless/ath/ath10k/mac.c
3981
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
drivers/net/wireless/ath/ath10k/mac.c
3983
if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
drivers/net/wireless/ath/ath10k/mac.c
3988
skb_queue_tail(q, skb);
drivers/net/wireless/ath/ath11k/mac.c
6478
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
drivers/net/wireless/ath/ath11k/mac.c
6495
if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) {
drivers/net/wireless/ath/ath11k/mac.c
6500
skb_queue_tail(q, skb);
drivers/net/wireless/ath/ath12k/mac.c
9334
struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
drivers/net/wireless/ath/ath12k/mac.c
9351
if (skb_queue_len_lockless(q) >= ATH12K_TX_MGMT_NUM_PENDING_MAX) {
drivers/net/wireless/ath/ath12k/mac.c
9356
skb_queue_tail(q, skb);
drivers/net/wireless/ath/ath5k/ath5k.h
632
struct list_head q;
drivers/net/wireless/ath/ath5k/base.c
1059
INIT_LIST_HEAD(&txq->q);
drivers/net/wireless/ath/ath5k/base.c
1165
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
drivers/net/wireless/ath/ath5k/base.c
1786
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
drivers/net/wireless/ath/ath5k/base.c
876
list_add_tail(&bf->list, &txq->q);
drivers/net/wireless/ath/ath5k/debug.c
834
list_for_each_entry_safe(bf, bf0, &txq->q, list)
drivers/net/wireless/ath/ath5k/trace.h
41
struct ath5k_txq *q),
drivers/net/wireless/ath/ath5k/trace.h
43
TP_ARGS(priv, skb, q),
drivers/net/wireless/ath/ath5k/trace.h
55
__entry->qnum = (u8) q->qnum;
drivers/net/wireless/ath/ath5k/trace.h
67
struct ath5k_txq *q, struct ath5k_tx_status *ts),
drivers/net/wireless/ath/ath5k/trace.h
69
TP_ARGS(priv, skb, q, ts),
drivers/net/wireless/ath/ath5k/trace.h
83
__entry->qnum = (u8) q->qnum;
drivers/net/wireless/ath/ath6kl/core.h
330
struct sk_buff_head q;
drivers/net/wireless/ath/ath6kl/txrx.c
1028
skb_queue_tail(&rxtid->q, new_skb);
drivers/net/wireless/ath/ath6kl/txrx.c
1087
skb_queue_tail(&rxtid->q, node->skb);
drivers/net/wireless/ath/ath6kl/txrx.c
1099
stats->num_delivered += skb_queue_len(&rxtid->q);
drivers/net/wireless/ath/ath6kl/txrx.c
1101
while ((skb = skb_dequeue(&rxtid->q)))
drivers/net/wireless/ath/ath6kl/txrx.c
1127
while ((skb = skb_dequeue(&rxtid->q)))
drivers/net/wireless/ath/ath6kl/txrx.c
1741
if (!skb_queue_empty(&rxtid->q))
drivers/net/wireless/ath/ath6kl/txrx.c
1764
skb_queue_head_init(&rxtid->q);
drivers/net/wireless/ath/ath6kl/txrx.c
845
static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
drivers/net/wireless/ath/ath6kl/txrx.c
855
skb_queue_tail(q, skb);
drivers/net/wireless/ath/ath9k/ath9k.h
593
struct ath9k_tx_queue_info *q);
drivers/net/wireless/ath/ath9k/debug.h
28
#define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0)
drivers/net/wireless/ath/ath9k/debug.h
34
#define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0)
drivers/net/wireless/ath/ath9k/htc.h
331
#define TX_QSTAT_INC(priv, q) do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0)
drivers/net/wireless/ath/ath9k/mac.c
137
int i, q;
drivers/net/wireless/ath/ath9k/mac.c
152
for (q = 0; q < AR_NUM_QCU; q++) {
drivers/net/wireless/ath/ath9k/mac.c
157
if (!ath9k_hw_numtxpending(ah, q))
drivers/net/wireless/ath/ath9k/mac.c
170
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
177
REG_WRITE(ah, AR_Q_TXD, 1 << q);
drivers/net/wireless/ath/ath9k/mac.c
183
if (ath9k_hw_numtxpending(ah, q) == 0)
drivers/net/wireless/ath/ath9k/mac.c
196
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
drivers/net/wireless/ath/ath9k/mac.c
203
qi = &ah->txq[q];
drivers/net/wireless/ath/ath9k/mac.c
206
"Set TXQ properties, inactive queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
210
ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
261
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
drivers/net/wireless/ath/ath9k/mac.c
267
qi = &ah->txq[q];
drivers/net/wireless/ath/ath9k/mac.c
270
"Get TXQ properties, inactive queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
298
int q;
drivers/net/wireless/ath/ath9k/mac.c
302
q = ATH9K_NUM_TX_QUEUES - 1;
drivers/net/wireless/ath/ath9k/mac.c
305
q = ATH9K_NUM_TX_QUEUES - 2;
drivers/net/wireless/ath/ath9k/mac.c
308
q = 1;
drivers/net/wireless/ath/ath9k/mac.c
311
q = ATH9K_NUM_TX_QUEUES - 3;
drivers/net/wireless/ath/ath9k/mac.c
314
q = qinfo->tqi_subtype;
drivers/net/wireless/ath/ath9k/mac.c
321
ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
323
qi = &ah->txq[q];
drivers/net/wireless/ath/ath9k/mac.c
325
ath_err(common, "TX queue: %u already active\n", q);
drivers/net/wireless/ath/ath9k/mac.c
331
(void) ath9k_hw_set_txq_props(ah, q, qinfo);
drivers/net/wireless/ath/ath9k/mac.c
333
return q;
drivers/net/wireless/ath/ath9k/mac.c
337
static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
339
ah->txok_interrupt_mask &= ~(1 << q);
drivers/net/wireless/ath/ath9k/mac.c
340
ah->txerr_interrupt_mask &= ~(1 << q);
drivers/net/wireless/ath/ath9k/mac.c
341
ah->txdesc_interrupt_mask &= ~(1 << q);
drivers/net/wireless/ath/ath9k/mac.c
342
ah->txeol_interrupt_mask &= ~(1 << q);
drivers/net/wireless/ath/ath9k/mac.c
343
ah->txurn_interrupt_mask &= ~(1 << q);
drivers/net/wireless/ath/ath9k/mac.c
346
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
351
qi = &ah->txq[q];
drivers/net/wireless/ath/ath9k/mac.c
353
ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
357
ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
360
ath9k_hw_clear_queue_interrupts(ah, q);
drivers/net/wireless/ath/ath9k/mac.c
367
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
373
qi = &ah->txq[q];
drivers/net/wireless/ath/ath9k/mac.c
375
ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
379
ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
390
REG_WRITE(ah, AR_DLCL_IFS(q),
drivers/net/wireless/ath/ath9k/mac.c
395
REG_WRITE(ah, AR_DRETRY_LIMIT(q),
drivers/net/wireless/ath/ath9k/mac.c
400
REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
drivers/net/wireless/ath/ath9k/mac.c
403
REG_WRITE(ah, AR_DMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
406
REG_WRITE(ah, AR_DMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
410
REG_WRITE(ah, AR_QCBRCFG(q),
drivers/net/wireless/ath/ath9k/mac.c
413
REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
drivers/net/wireless/ath/ath9k/mac.c
418
REG_WRITE(ah, AR_QRDYTIMECFG(q),
drivers/net/wireless/ath/ath9k/mac.c
423
REG_WRITE(ah, AR_DCHNTIME(q),
drivers/net/wireless/ath/ath9k/mac.c
429
REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
drivers/net/wireless/ath/ath9k/mac.c
432
REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
drivers/net/wireless/ath/ath9k/mac.c
437
REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
drivers/net/wireless/ath/ath9k/mac.c
443
REG_SET_BIT(ah, AR_QMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
448
REG_SET_BIT(ah, AR_DMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
46
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
463
REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
drivers/net/wireless/ath/ath9k/mac.c
471
REG_SET_BIT(ah, AR_QMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
478
REG_WRITE(ah, AR_QRDYTIMECFG(q),
drivers/net/wireless/ath/ath9k/mac.c
48
return REG_READ(ah, AR_QTXDP(q));
drivers/net/wireless/ath/ath9k/mac.c
480
REG_SET_BIT(ah, AR_DMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
488
REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
drivers/net/wireless/ath/ath9k/mac.c
491
REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
drivers/net/wireless/ath/ath9k/mac.c
498
REG_SET_BIT(ah, AR_DMISC(q),
drivers/net/wireless/ath/ath9k/mac.c
507
ath9k_hw_clear_queue_interrupts(ah, q);
drivers/net/wireless/ath/ath9k/mac.c
509
ah->txok_interrupt_mask |= 1 << q;
drivers/net/wireless/ath/ath9k/mac.c
510
ah->txerr_interrupt_mask |= 1 << q;
drivers/net/wireless/ath/ath9k/mac.c
513
ah->txdesc_interrupt_mask |= 1 << q;
drivers/net/wireless/ath/ath9k/mac.c
515
ah->txeol_interrupt_mask |= 1 << q;
drivers/net/wireless/ath/ath9k/mac.c
517
ah->txurn_interrupt_mask |= 1 << q;
drivers/net/wireless/ath/ath9k/mac.c
52
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
drivers/net/wireless/ath/ath9k/mac.c
54
REG_WRITE(ah, AR_QTXDP(q), txdp);
drivers/net/wireless/ath/ath9k/mac.c
58
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
60
ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
drivers/net/wireless/ath/ath9k/mac.c
61
REG_WRITE(ah, AR_Q_TXE, 1 << q);
drivers/net/wireless/ath/ath9k/mac.c
65
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
drivers/net/wireless/ath/ath9k/mac.c
69
npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
drivers/net/wireless/ath/ath9k/mac.c
72
if (REG_READ(ah, AR_Q_TXE) & (1 << q))
drivers/net/wireless/ath/ath9k/mac.h
720
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
drivers/net/wireless/ath/ath9k/mac.h
721
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
drivers/net/wireless/ath/ath9k/mac.h
722
void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
drivers/net/wireless/ath/ath9k/mac.h
723
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
drivers/net/wireless/ath/ath9k/mac.h
725
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q);
drivers/net/wireless/ath/ath9k/mac.h
727
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
drivers/net/wireless/ath/ath9k/mac.h
729
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
drivers/net/wireless/ath/ath9k/mac.h
733
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
drivers/net/wireless/ath/ath9k/mac.h
734
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
drivers/net/wireless/ath/ath9k/xmit.c
110
struct sk_buff_head q;
drivers/net/wireless/ath/ath9k/xmit.c
113
__skb_queue_head_init(&q);
drivers/net/wireless/ath/ath9k/xmit.c
114
skb_queue_splice_init(&txq->complete_q, &q);
drivers/net/wireless/ath/ath9k/xmit.c
117
while ((skb = __skb_dequeue(&q)))
drivers/net/wireless/ath/ath9k/xmit.c
214
int q = fi->txq;
drivers/net/wireless/ath/ath9k/xmit.c
216
if (q < 0)
drivers/net/wireless/ath/ath9k/xmit.c
219
txq = sc->tx.txq_map[q];
drivers/net/wireless/ath/ath9k/xmit.c
2338
int q, ret;
drivers/net/wireless/ath/ath9k/xmit.c
2351
q = skb_get_queue_mapping(skb);
drivers/net/wireless/ath/ath9k/xmit.c
2362
if (txq == sc->tx.txq_map[q]) {
drivers/net/wireless/ath/ath9k/xmit.c
2363
fi->txq = q;
drivers/net/wireless/ath/ath9k/xmit.c
244
int q, ret;
drivers/net/wireless/ath/ath9k/xmit.c
256
q = skb_get_queue_mapping(skb);
drivers/net/wireless/ath/ath9k/xmit.c
257
if (tid->txq == sc->tx.txq_map[q]) {
drivers/net/wireless/ath/ath9k/xmit.c
259
fi->txq = q;
drivers/net/wireless/ath/ath9k/xmit.c
804
int q = tid->txq->mac80211_qnum;
drivers/net/wireless/ath/ath9k/xmit.c
836
frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
drivers/net/wireless/ath/carl9170/debug.c
369
#define DEBUGFS_QUEUE_DUMP(q, qi) \
drivers/net/wireless/ath/carl9170/debug.c
370
static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \
drivers/net/wireless/ath/carl9170/debug.c
373
carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \
drivers/net/wireless/ath/carl9170/debug.c
376
DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000);
drivers/net/wireless/ath/carl9170/tx.c
1278
uint8_t q = 0;
drivers/net/wireless/ath/carl9170/tx.c
1283
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
drivers/net/wireless/ath/carl9170/tx.c
1285
__carl9170_tx_process_status(ar, super->s.cookie, q);
drivers/net/wireless/ath/carl9170/tx.c
1344
unsigned int i, q;
drivers/net/wireless/ath/carl9170/tx.c
1367
q = __carl9170_get_queue(ar, i);
drivers/net/wireless/ath/carl9170/tx.c
1372
skb_queue_tail(&ar->tx_status[q], skb);
drivers/net/wireless/ath/carl9170/tx.c
663
unsigned int r, t, q;
drivers/net/wireless/ath/carl9170/tx.c
666
q = ar9170_qmap(info & CARL9170_TX_STATUS_QUEUE);
drivers/net/wireless/ath/carl9170/tx.c
668
skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
drivers/net/wireless/ath/wil6210/netdev.c
232
bool q;
drivers/net/wireless/ath/wil6210/netdev.c
240
q = queue_work(wil->wmi_wq, &vif->disconnect_worker);
drivers/net/wireless/ath/wil6210/netdev.c
241
wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q);
drivers/net/wireless/ath/wil6210/txrx.c
838
bool q = false;
drivers/net/wireless/ath/wil6210/txrx.c
857
q = true;
drivers/net/wireless/ath/wil6210/txrx.c
865
if (q) {
drivers/net/wireless/ath/wil6210/txrx.c
866
q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker);
drivers/net/wireless/ath/wil6210/txrx.c
868
q);
drivers/net/wireless/ath/wil6210/wmi.c
1929
bool q;
drivers/net/wireless/ath/wil6210/wmi.c
2030
q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
drivers/net/wireless/ath/wil6210/wmi.c
2031
wil_dbg_wmi(wil, "queue_work -> %d\n", q);
drivers/net/wireless/ath/wil6210/wmi.h
2730
__le32 q;
drivers/net/wireless/broadcom/b43/debugfs.c
463
cal->ctl.i, cal->ctl.q,
drivers/net/wireless/broadcom/b43/lo.c
51
if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) {
drivers/net/wireless/broadcom/b43/lo.c
53
"(I: %d, Q: %d)\n", control->i, control->q);
drivers/net/wireless/broadcom/b43/lo.c
573
.q = -100,
drivers/net/wireless/broadcom/b43/lo.c
581
{.i = 1,.q = 1,},
drivers/net/wireless/broadcom/b43/lo.c
582
{.i = 1,.q = 0,},
drivers/net/wireless/broadcom/b43/lo.c
583
{.i = 1,.q = -1,},
drivers/net/wireless/broadcom/b43/lo.c
584
{.i = 0,.q = -1,},
drivers/net/wireless/broadcom/b43/lo.c
585
{.i = -1,.q = -1,},
drivers/net/wireless/broadcom/b43/lo.c
586
{.i = -1,.q = 0,},
drivers/net/wireless/broadcom/b43/lo.c
587
{.i = -1,.q = 1,},
drivers/net/wireless/broadcom/b43/lo.c
588
{.i = 0,.q = 1,},
drivers/net/wireless/broadcom/b43/lo.c
60
value = (u8) (control->q);
drivers/net/wireless/broadcom/b43/lo.c
613
test_loctl.q += modifiers[i - 1].q * d->state_val_multiplier;
drivers/net/wireless/broadcom/b43/lo.c
615
test_loctl.q != prev_loctl.q) &&
drivers/net/wireless/broadcom/b43/lo.c
616
(abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) {
drivers/net/wireless/broadcom/b43/lo.c
692
(probe_loctl.q == d.min_loctl.q))
drivers/net/wireless/broadcom/b43/lo.c
728
.q = 0,
drivers/net/wireless/broadcom/b43/lo.c
766
loctl.i, loctl.q);
drivers/net/wireless/broadcom/b43/lo.c
850
val = (u8)(cal->ctl.q);
drivers/net/wireless/broadcom/b43/lo.c
965
cal->ctl.i, cal->ctl.q);
drivers/net/wireless/broadcom/b43/lo.h
15
s8 q;
drivers/net/wireless/broadcom/b43/phy_g.c
2336
s32 m1, m2, f = 256, q, delta;
drivers/net/wireless/broadcom/b43/phy_g.c
2344
q = b43_tssi2dbm_ad(f * 4096 -
drivers/net/wireless/broadcom/b43/phy_g.c
2346
delta = abs(q - f);
drivers/net/wireless/broadcom/b43/phy_g.c
2347
f = q;
drivers/net/wireless/broadcom/b43/phy_lp.c
1792
buf[i] |= CORDIC_FLOAT((sample.q * max) & 0xFF);
drivers/net/wireless/broadcom/b43/phy_n.c
1519
data[i] |= samples[i].q & 0x3FF;
drivers/net/wireless/broadcom/b43/phy_n.c
1563
samples[i].q = CORDIC_FLOAT(samples[i].q * max);
drivers/net/wireless/broadcom/b43/pio.c
126
struct b43_pio_txqueue *q;
drivers/net/wireless/broadcom/b43/pio.c
130
q = kzalloc_obj(*q);
drivers/net/wireless/broadcom/b43/pio.c
131
if (!q)
drivers/net/wireless/broadcom/b43/pio.c
133
q->dev = dev;
drivers/net/wireless/broadcom/b43/pio.c
134
q->rev = dev->dev->core_rev;
drivers/net/wireless/broadcom/b43/pio.c
135
q->mmio_base = index_to_pioqueue_base(dev, index) +
drivers/net/wireless/broadcom/b43/pio.c
137
q->index = index;
drivers/net/wireless/broadcom/b43/pio.c
139
q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
drivers/net/wireless/broadcom/b43/pio.c
140
if (q->rev >= 8) {
drivers/net/wireless/broadcom/b43/pio.c
141
q->buffer_size = 1920; //FIXME this constant is wrong.
drivers/net/wireless/broadcom/b43/pio.c
143
q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
drivers/net/wireless/broadcom/b43/pio.c
144
q->buffer_size -= 80;
drivers/net/wireless/broadcom/b43/pio.c
147
INIT_LIST_HEAD(&q->packets_list);
drivers/net/wireless/broadcom/b43/pio.c
148
for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
drivers/net/wireless/broadcom/b43/pio.c
149
p = &(q->packets[i]);
drivers/net/wireless/broadcom/b43/pio.c
152
p->queue = q;
drivers/net/wireless/broadcom/b43/pio.c
153
list_add(&p->list, &q->packets_list);
drivers/net/wireless/broadcom/b43/pio.c
156
return q;
drivers/net/wireless/broadcom/b43/pio.c
162
struct b43_pio_rxqueue *q;
drivers/net/wireless/broadcom/b43/pio.c
164
q = kzalloc_obj(*q);
drivers/net/wireless/broadcom/b43/pio.c
165
if (!q)
drivers/net/wireless/broadcom/b43/pio.c
167
q->dev = dev;
drivers/net/wireless/broadcom/b43/pio.c
168
q->rev = dev->dev->core_rev;
drivers/net/wireless/broadcom/b43/pio.c
169
q->mmio_base = index_to_pioqueue_base(dev, index) +
drivers/net/wireless/broadcom/b43/pio.c
175
return q;
drivers/net/wireless/broadcom/b43/pio.c
178
static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
drivers/net/wireless/broadcom/b43/pio.c
183
for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
drivers/net/wireless/broadcom/b43/pio.c
184
pack = &(q->packets[i]);
drivers/net/wireless/broadcom/b43/pio.c
186
ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
drivers/net/wireless/broadcom/b43/pio.c
192
static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.c
195
if (!q)
drivers/net/wireless/broadcom/b43/pio.c
197
b43_pio_cancel_tx_packets(q);
drivers/net/wireless/broadcom/b43/pio.c
198
kfree(q);
drivers/net/wireless/broadcom/b43/pio.c
201
static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
drivers/net/wireless/broadcom/b43/pio.c
204
if (!q)
drivers/net/wireless/broadcom/b43/pio.c
206
kfree(q);
drivers/net/wireless/broadcom/b43/pio.c
24
static u16 generate_cookie(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.c
290
struct b43_pio_txqueue *q;
drivers/net/wireless/broadcom/b43/pio.c
299
q = dev->pio.tx_queue_AC_VO;
drivers/net/wireless/broadcom/b43/pio.c
302
q = dev->pio.tx_queue_AC_VI;
drivers/net/wireless/broadcom/b43/pio.c
305
q = dev->pio.tx_queue_AC_BE;
drivers/net/wireless/broadcom/b43/pio.c
308
q = dev->pio.tx_queue_AC_BK;
drivers/net/wireless/broadcom/b43/pio.c
312
q = dev->pio.tx_queue_AC_BE;
drivers/net/wireless/broadcom/b43/pio.c
314
return q;
drivers/net/wireless/broadcom/b43/pio.c
317
static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.c
322
struct b43_wldev *dev = q->dev;
drivers/net/wireless/broadcom/b43/pio.c
327
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
drivers/net/wireless/broadcom/b43/pio.c
330
q->mmio_base + B43_PIO_TXDATA,
drivers/net/wireless/broadcom/b43/pio.c
338
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
drivers/net/wireless/broadcom/b43/pio.c
342
q->mmio_base + B43_PIO_TXDATA,
drivers/net/wireless/broadcom/b43/pio.c
352
struct b43_pio_txqueue *q = pack->queue;
drivers/net/wireless/broadcom/b43/pio.c
357
ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
drivers/net/wireless/broadcom/b43/pio.c
362
ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
drivers/net/wireless/broadcom/b43/pio.c
364
ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
drivers/net/wireless/broadcom/b43/pio.c
367
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
drivers/net/wireless/broadcom/b43/pio.c
37
cookie = (((u16)q->index + 1) << 12);
drivers/net/wireless/broadcom/b43/pio.c
370
static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.c
375
struct b43_wldev *dev = q->dev;
drivers/net/wireless/broadcom/b43/pio.c
381
b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
drivers/net/wireless/broadcom/b43/pio.c
384
q->mmio_base + B43_PIO8_TXDATA,
drivers/net/wireless/broadcom/b43/pio.c
410
b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
drivers/net/wireless/broadcom/b43/pio.c
412
q->mmio_base + B43_PIO8_TXDATA,
drivers/net/wireless/broadcom/b43/pio.c
422
struct b43_pio_txqueue *q = pack->queue;
drivers/net/wireless/broadcom/b43/pio.c
427
ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
drivers/net/wireless/broadcom/b43/pio.c
432
ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
drivers/net/wireless/broadcom/b43/pio.c
434
ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
drivers/net/wireless/broadcom/b43/pio.c
437
b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
drivers/net/wireless/broadcom/b43/pio.c
440
static int pio_tx_frame(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.c
443
struct b43_wldev *dev = q->dev;
drivers/net/wireless/broadcom/b43/pio.c
452
B43_WARN_ON(list_empty(&q->packets_list));
drivers/net/wireless/broadcom/b43/pio.c
453
pack = list_entry(q->packets_list.next,
drivers/net/wireless/broadcom/b43/pio.c
456
cookie = generate_cookie(q, pack);
drivers/net/wireless/broadcom/b43/pio.c
473
if (q->rev >= 8)
drivers/net/wireless/broadcom/b43/pio.c
483
q->buffer_used += roundup(skb->len + hdrlen, 4);
drivers/net/wireless/broadcom/b43/pio.c
484
q->free_packet_slots -= 1;
drivers/net/wireless/broadcom/b43/pio.c
49
struct b43_pio_txqueue *q = NULL;
drivers/net/wireless/broadcom/b43/pio.c
491
struct b43_pio_txqueue *q;
drivers/net/wireless/broadcom/b43/pio.c
501
q = dev->pio.tx_queue_mcast;
drivers/net/wireless/broadcom/b43/pio.c
507
q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
drivers/net/wireless/broadcom/b43/pio.c
513
if (unlikely(total_len > q->buffer_size)) {
drivers/net/wireless/broadcom/b43/pio.c
518
if (unlikely(q->free_packet_slots == 0)) {
drivers/net/wireless/broadcom/b43/pio.c
523
B43_WARN_ON(q->buffer_used > q->buffer_size);
drivers/net/wireless/broadcom/b43/pio.c
525
if (total_len > (q->buffer_size - q->buffer_used)) {
drivers/net/wireless/broadcom/b43/pio.c
529
q->stopped = true;
drivers/net/wireless/broadcom/b43/pio.c
536
q->queue_prio = skb_get_queue_mapping(skb);
drivers/net/wireless/broadcom/b43/pio.c
538
err = pio_tx_frame(q, skb);
drivers/net/wireless/broadcom/b43/pio.c
54
q = pio->tx_queue_AC_BK;
drivers/net/wireless/broadcom/b43/pio.c
551
B43_WARN_ON(q->buffer_used > q->buffer_size);
drivers/net/wireless/broadcom/b43/pio.c
552
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
drivers/net/wireless/broadcom/b43/pio.c
553
(q->free_packet_slots == 0)) {
drivers/net/wireless/broadcom/b43/pio.c
556
q->stopped = true;
drivers/net/wireless/broadcom/b43/pio.c
566
struct b43_pio_txqueue *q;
drivers/net/wireless/broadcom/b43/pio.c
57
q = pio->tx_queue_AC_BE;
drivers/net/wireless/broadcom/b43/pio.c
571
q = parse_cookie(dev, status->cookie, &pack);
drivers/net/wireless/broadcom/b43/pio.c
572
if (unlikely(!q))
drivers/net/wireless/broadcom/b43/pio.c
582
q->buffer_used -= total_len;
drivers/net/wireless/broadcom/b43/pio.c
583
q->free_packet_slots += 1;
drivers/net/wireless/broadcom/b43/pio.c
587
list_add(&pack->list, &q->packets_list);
drivers/net/wireless/broadcom/b43/pio.c
589
if (q->stopped) {
drivers/net/wireless/broadcom/b43/pio.c
590
b43_wake_queue(dev, q->queue_prio);
drivers/net/wireless/broadcom/b43/pio.c
591
q->stopped = false;
drivers/net/wireless/broadcom/b43/pio.c
596
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
drivers/net/wireless/broadcom/b43/pio.c
598
struct b43_wldev *dev = q->dev;
drivers/net/wireless/broadcom/b43/pio.c
60
q = pio->tx_queue_AC_VI;
drivers/net/wireless/broadcom/b43/pio.c
622
if (q->rev >= 8) {
drivers/net/wireless/broadcom/b43/pio.c
625
ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
drivers/net/wireless/broadcom/b43/pio.c
628
b43_piorx_write32(q, B43_PIO8_RXCTL,
drivers/net/wireless/broadcom/b43/pio.c
63
q = pio->tx_queue_AC_VO;
drivers/net/wireless/broadcom/b43/pio.c
631
ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
drivers/net/wireless/broadcom/b43/pio.c
639
ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
drivers/net/wireless/broadcom/b43/pio.c
642
b43_piorx_write16(q, B43_PIO_RXCTL,
drivers/net/wireless/broadcom/b43/pio.c
645
ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
drivers/net/wireless/broadcom/b43/pio.c
651
b43dbg(q->dev->wl, "PIO RX timed out\n");
drivers/net/wireless/broadcom/b43/pio.c
656
if (q->rev >= 8) {
drivers/net/wireless/broadcom/b43/pio.c
658
q->mmio_base + B43_PIO8_RXDATA,
drivers/net/wireless/broadcom/b43/pio.c
66
q = pio->tx_queue_mcast;
drivers/net/wireless/broadcom/b43/pio.c
662
q->mmio_base + B43_PIO_RXDATA,
drivers/net/wireless/broadcom/b43/pio.c
686
if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
drivers/net/wireless/broadcom/b43/pio.c
69
if (B43_WARN_ON(!q))
drivers/net/wireless/broadcom/b43/pio.c
704
if (q->rev >= 8) {
drivers/net/wireless/broadcom/b43/pio.c
706
q->mmio_base + B43_PIO8_RXDATA,
drivers/net/wireless/broadcom/b43/pio.c
714
q->mmio_base + B43_PIO8_RXDATA,
drivers/net/wireless/broadcom/b43/pio.c
72
if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
drivers/net/wireless/broadcom/b43/pio.c
733
q->mmio_base + B43_PIO_RXDATA,
drivers/net/wireless/broadcom/b43/pio.c
74
*pack = &q->packets[pack_index];
drivers/net/wireless/broadcom/b43/pio.c
741
q->mmio_base + B43_PIO_RXDATA,
drivers/net/wireless/broadcom/b43/pio.c
747
b43_rx(q->dev, skb, rxhdr);
drivers/net/wireless/broadcom/b43/pio.c
753
b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
drivers/net/wireless/broadcom/b43/pio.c
754
if (q->rev >= 8)
drivers/net/wireless/broadcom/b43/pio.c
755
b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
drivers/net/wireless/broadcom/b43/pio.c
757
b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
drivers/net/wireless/broadcom/b43/pio.c
76
return q;
drivers/net/wireless/broadcom/b43/pio.c
762
void b43_pio_rx(struct b43_pio_rxqueue *q)
drivers/net/wireless/broadcom/b43/pio.c
768
stop = !pio_rx_frame(q);
drivers/net/wireless/broadcom/b43/pio.c
777
static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
drivers/net/wireless/broadcom/b43/pio.c
779
if (q->rev >= 8) {
drivers/net/wireless/broadcom/b43/pio.c
780
b43_piotx_write32(q, B43_PIO8_TXCTL,
drivers/net/wireless/broadcom/b43/pio.c
781
b43_piotx_read32(q, B43_PIO8_TXCTL)
drivers/net/wireless/broadcom/b43/pio.c
784
b43_piotx_write16(q, B43_PIO_TXCTL,
drivers/net/wireless/broadcom/b43/pio.c
785
b43_piotx_read16(q, B43_PIO_TXCTL)
drivers/net/wireless/broadcom/b43/pio.c
790
static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
drivers/net/wireless/broadcom/b43/pio.c
792
if (q->rev >= 8) {
drivers/net/wireless/broadcom/b43/pio.c
793
b43_piotx_write32(q, B43_PIO8_TXCTL,
drivers/net/wireless/broadcom/b43/pio.c
794
b43_piotx_read32(q, B43_PIO8_TXCTL)
drivers/net/wireless/broadcom/b43/pio.c
797
b43_piotx_write16(q, B43_PIO_TXCTL,
drivers/net/wireless/broadcom/b43/pio.c
798
b43_piotx_read16(q, B43_PIO_TXCTL)
drivers/net/wireless/broadcom/b43/pio.h
109
static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset)
drivers/net/wireless/broadcom/b43/pio.h
111
return b43_read16(q->dev, q->mmio_base + offset);
drivers/net/wireless/broadcom/b43/pio.h
114
static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset)
drivers/net/wireless/broadcom/b43/pio.h
116
return b43_read32(q->dev, q->mmio_base + offset);
drivers/net/wireless/broadcom/b43/pio.h
119
static inline void b43_piotx_write16(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.h
122
b43_write16(q->dev, q->mmio_base + offset, value);
drivers/net/wireless/broadcom/b43/pio.h
125
static inline void b43_piotx_write32(struct b43_pio_txqueue *q,
drivers/net/wireless/broadcom/b43/pio.h
128
b43_write32(q->dev, q->mmio_base + offset, value);
drivers/net/wireless/broadcom/b43/pio.h
132
static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset)
drivers/net/wireless/broadcom/b43/pio.h
134
return b43_read16(q->dev, q->mmio_base + offset);
drivers/net/wireless/broadcom/b43/pio.h
137
static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset)
drivers/net/wireless/broadcom/b43/pio.h
139
return b43_read32(q->dev, q->mmio_base + offset);
drivers/net/wireless/broadcom/b43/pio.h
142
static inline void b43_piorx_write16(struct b43_pio_rxqueue *q,
drivers/net/wireless/broadcom/b43/pio.h
145
b43_write16(q->dev, q->mmio_base + offset, value);
drivers/net/wireless/broadcom/b43/pio.h
148
static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
drivers/net/wireless/broadcom/b43/pio.h
151
b43_write32(q->dev, q->mmio_base + offset, value);
drivers/net/wireless/broadcom/b43/pio.h
161
void b43_pio_rx(struct b43_pio_rxqueue *q);
drivers/net/wireless/broadcom/b43/sdio.c
39
const struct b43_sdio_quirk *q;
drivers/net/wireless/broadcom/b43/sdio.c
41
for (q = b43_sdio_quirks; q->quirks; q++) {
drivers/net/wireless/broadcom/b43/sdio.c
42
if (vendor == q->vendor && device == q->device)
drivers/net/wireless/broadcom/b43/sdio.c
43
return q->quirks;
drivers/net/wireless/broadcom/b43legacy/phy.c
1947
s32 q;
drivers/net/wireless/broadcom/b43legacy/phy.c
1956
q = b43legacy_tssi2dbm_ad(f * 4096 -
drivers/net/wireless/broadcom/b43legacy/phy.c
1959
delta = abs(q - f);
drivers/net/wireless/broadcom/b43legacy/phy.c
1960
f = q;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
241
struct brcmf_fweh_queue_item, q);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
242
list_del(&event->q);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
31
struct list_head q;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
92
list_add_tail(&event->q, &fweh->event_q);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
1265
queue = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
621
static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
632
for (prec = 0; prec < q->num_prec; prec++) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
633
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
642
skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2755
static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2763
if (!pktq_pfull(q, prec) && !pktq_full(q)) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2764
brcmu_pktq_penq(q, prec, pkt);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2769
if (pktq_pfull(q, prec)) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2771
} else if (pktq_full(q)) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2772
p = brcmu_pktq_peek_tail(q, &eprec);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2783
p = brcmu_pktq_pdeq_tail(q, eprec);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2790
p = brcmu_pktq_penq(q, prec, pkt);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
395
struct list_head *q, int *counter)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
400
if (list_empty(q)) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
404
req = list_entry(q->next, struct brcmf_usbreq, list);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
405
list_del_init(q->next);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
414
struct list_head *q, struct brcmf_usbreq *req,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
419
list_add_tail(&req->list, q);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
426
brcmf_usbdev_qinit(struct list_head *q, int qsize)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
443
list_add_tail(&req->list, q);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
449
while (!list_empty(q)) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
450
req = list_entry(q->next, struct brcmf_usbreq, list);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
453
list_del(q->next);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
460
static void brcmf_usb_free_q(struct list_head *q)
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
464
list_for_each_entry_safe(req, next, q, list) {
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
234
int q;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
356
s8 q;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
869
s32 q;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
3378
q_samp = (u16)(CORDIC_FLOAT(tone_samp.q * max_val) & 0x3ff);
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
23019
(((unsigned int)tone_buf[t].q) & 0x3ff);
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
23066
tone_buf[t].q = (s32)CORDIC_FLOAT(tone_buf[t].q * max_val);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
109
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
112
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
113
skb_queue_walk_safe(q, p, next) {
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
115
skb_unlink(p, q);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
126
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
129
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
130
p = skb_dequeue_tail(q);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
143
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
146
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
147
skb_queue_walk_safe(q, p, next) {
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
149
skb_unlink(p, q);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
172
offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
179
pq->q[prec].max = pq->max;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
180
skb_queue_head_init(&pq->q[prec].skblist);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
193
if (!skb_queue_empty(&pq->q[prec].skblist))
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
199
return skb_peek_tail(&pq->q[prec].skblist);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
212
len += pq->q[prec].skblist.qlen;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
222
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
230
skb_queue_empty(&pq->q[prec].skblist))
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
234
skb_queue_empty(&pq->q[prec].skblist))
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
238
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
239
p = skb_dequeue(q);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
49
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
54
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
55
skb_queue_tail(q, p);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
68
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
73
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
74
skb_queue_head(q, p);
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
86
struct sk_buff_head *q;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
89
q = &pq->q[prec].skblist;
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
90
p = skb_dequeue(q);
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
103
return skb_peek_tail(&pq->q[prec].skblist);
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
71
struct pktq_prec q[PKTQ_MAX_PREC];
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
78
return pq->q[prec].skblist.qlen;
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
83
return pq->q[prec].max - pq->q[prec].skblist.qlen;
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
88
return pq->q[prec].skblist.qlen >= pq->q[prec].max;
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
93
return skb_queue_empty(&pq->q[prec].skblist);
drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h
98
return skb_peek(&pq->q[prec].skblist);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4296
struct ipw2100_status_queue *q = &priv->status_queue;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4300
q->size = entries * sizeof(struct ipw2100_status);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4301
q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4303
if (!q->drv) {
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4329
struct ipw2100_bd_queue *q, int entries)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4333
memset(q, 0, sizeof(struct ipw2100_bd_queue));
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4335
q->entries = entries;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4336
q->size = entries * sizeof(struct ipw2100_bd);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4337
q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4339
if (!q->drv) {
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4350
static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4354
if (!q)
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4357
if (q->drv) {
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4358
dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv,
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4359
q->nic);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4360
q->drv = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4367
struct ipw2100_bd_queue *q, u32 base, u32 size,
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4372
IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv,
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4373
(u32) q->nic);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4375
write_register(priv->net_dev, base, q->nic);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4376
write_register(priv->net_dev, size, q->entries);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4377
write_register(priv->net_dev, r, q->oldest);
drivers/net/wireless/intel/ipw2x00/ipw2100.c
4378
write_register(priv->net_dev, w, q->next);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10059
struct clx2_queue *q = &txq->q;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10089
tfd = &txq->bd[q->first_empty];
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10090
txq->txb[q->first_empty] = txb;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10225
q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10226
ipw_write32(priv, q->reg_w, q->first_empty);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10228
if (ipw_tx_queue_space(q) < q->high_mark)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
10249
if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11725
struct list_head *p, *q;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
11779
list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3652
static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3654
int s = q->read - q->write;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3664
static inline int ipw_tx_queue_space(const struct clx2_queue *q)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3666
int s = q->last_used - q->first_empty;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3668
s += q->n_bd;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3694
static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3697
q->n_bd = count;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3699
q->low_mark = q->n_bd / 4;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3700
if (q->low_mark < 4)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3701
q->low_mark = 4;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3703
q->high_mark = q->n_bd / 8;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3704
if (q->high_mark < 2)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3705
q->high_mark = 2;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3707
q->first_empty = q->last_used = 0;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3708
q->reg_r = read;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3709
q->reg_w = write;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3711
ipw_write32(priv, base, q->dma_addr);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3720
struct clx2_tx_queue *q,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3725
q->txb = kmalloc_objs(q->txb[0], count);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3726
if (!q->txb)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3729
q->bd =
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3730
dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3731
&q->q.dma_addr, GFP_KERNEL);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3732
if (!q->bd) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3734
sizeof(q->bd[0]) * count);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3735
kfree(q->txb);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3736
q->txb = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3740
ipw_queue_init(priv, &q->q, count, read, write, base, size);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3754
struct tfd_frame *bd = &txq->bd[txq->q.last_used];
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3777
if (txq->txb[txq->q.last_used]) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3778
libipw_txb_free(txq->txb[txq->q.last_used]);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3779
txq->txb[txq->q.last_used] = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3795
struct clx2_queue *q = &txq->q;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3798
if (q->n_bd == 0)
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3802
for (; q->first_empty != q->last_used;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3803
q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3808
dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
drivers/net/wireless/intel/ipw2x00/ipw2200.c
3809
q->dma_addr);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4964
struct clx2_queue *q = &txq->q;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4966
hw_tail = ipw_read32(priv, q->reg_r);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4967
if (hw_tail >= q->n_bd) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4970
hw_tail, q->n_bd);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4973
for (; q->last_used != hw_tail;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4974
q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4979
if ((ipw_tx_queue_space(q) > q->low_mark) &&
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4982
used = q->first_empty - q->last_used;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4984
used += q->n_bd;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4993
struct clx2_queue *q = &txq->q;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
4996
if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5001
tfd = &txq->bd[q->first_empty];
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5002
txq->txb[q->first_empty] = NULL;
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5011
q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
drivers/net/wireless/intel/ipw2x00/ipw2200.c
5012
ipw_write32(priv, q->reg_w, q->first_empty);
drivers/net/wireless/intel/ipw2x00/ipw2200.h
517
struct clx2_queue q;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
453
struct il_queue *q = NULL;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
512
q = &txq->q;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
514
if ((il_queue_space(q) < q->high_mark))
drivers/net/wireless/intel/iwlegacy/3945-mac.c
519
idx = il_get_cmd_idx(q, q->write_ptr, 0);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
521
txq->skbs[q->write_ptr] = skb;
drivers/net/wireless/intel/iwlegacy/3945-mac.c
539
(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
drivers/net/wireless/intel/iwlegacy/3945-mac.c
614
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
drivers/net/wireless/intel/iwlegacy/3945-mac.c
618
if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
drivers/net/wireless/intel/iwlegacy/3945.c
2199
int txq_id = txq->q.id;
drivers/net/wireless/intel/iwlegacy/3945.c
2203
shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
drivers/net/wireless/intel/iwlegacy/3945.c
275
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/3945.c
280
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
drivers/net/wireless/intel/iwlegacy/3945.c
281
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
drivers/net/wireless/intel/iwlegacy/3945.c
283
skb = txq->skbs[txq->q.read_ptr];
drivers/net/wireless/intel/iwlegacy/3945.c
285
txq->skbs[txq->q.read_ptr] = NULL;
drivers/net/wireless/intel/iwlegacy/3945.c
289
if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
drivers/net/wireless/intel/iwlegacy/3945.c
311
if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
drivers/net/wireless/intel/iwlegacy/3945.c
314
txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
drivers/net/wireless/intel/iwlegacy/3945.c
332
info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
drivers/net/wireless/intel/iwlegacy/3945.c
601
struct il_queue *q;
drivers/net/wireless/intel/iwlegacy/3945.c
604
q = &txq->q;
drivers/net/wireless/intel/iwlegacy/3945.c
606
tfd = &tfd_tmp[q->write_ptr];
drivers/net/wireless/intel/iwlegacy/3945.c
639
int idx = txq->q.read_ptr;
drivers/net/wireless/intel/iwlegacy/3945.c
668
struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
drivers/net/wireless/intel/iwlegacy/3945.c
673
txq->skbs[txq->q.read_ptr] = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1654
struct il_queue *q;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1755
q = &txq->q;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1757
if (unlikely(il_queue_space(q) < q->high_mark)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1770
txq->skbs[q->write_ptr] = skb;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1773
out_cmd = txq->cmd[q->write_ptr];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1774
out_meta = &txq->meta[q->write_ptr];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1788
(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1878
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
1899
if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2187
il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2188
il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2299
il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2300
il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2354
write_ptr = il->txq[txq_id].q.write_ptr;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2355
read_ptr = il->txq[txq_id].q.read_ptr;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2392
struct il_queue *q = &il->txq[txq_id].q;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2403
q->read_ptr == q->write_ptr) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2458
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2463
if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2465
"is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2466
q->write_ptr, q->read_ptr);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2470
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2471
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2473
skb = txq->skbs[txq->q.read_ptr];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2484
txq->skbs[txq->q.read_ptr] = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2776
if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2779
txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2785
skb = txq->skbs[txq->q.read_ptr];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2831
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2832
idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2839
il_queue_space(&txq->q) > txq->q.low_mark &&
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2863
il_queue_space(&txq->q) > txq->q.low_mark)
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2946
idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2966
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
drivers/net/wireless/intel/iwlegacy/4965-mac.c
2971
if (il_queue_space(&txq->q) > txq->q.low_mark &&
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3917
int idx = txq->q.read_ptr;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3946
struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3951
txq->skbs[txq->q.read_ptr] = NULL;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3960
struct il_queue *q;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3964
q = &txq->q;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3966
tfd = &tfd_tmp[q->write_ptr];
drivers/net/wireless/intel/iwlegacy/4965-mac.c
3999
int txq_id = txq->q.id;
drivers/net/wireless/intel/iwlegacy/4965-mac.c
4002
il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
drivers/net/wireless/intel/iwlegacy/4965-mac.c
6289
int txq_id = txq->q.id;
drivers/net/wireless/intel/iwlegacy/4965.c
1539
int txq_id = txq->q.id;
drivers/net/wireless/intel/iwlegacy/4965.c
1540
int write_ptr = txq->q.write_ptr;
drivers/net/wireless/intel/iwlegacy/commands.h
166
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
drivers/net/wireless/intel/iwlegacy/common.c
2537
il_rx_queue_space(const struct il_rx_queue *q)
drivers/net/wireless/intel/iwlegacy/common.c
2539
int s = q->read - q->write;
drivers/net/wireless/intel/iwlegacy/common.c
2554
il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
drivers/net/wireless/intel/iwlegacy/common.c
2560
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/intel/iwlegacy/common.c
2562
if (q->need_update == 0)
drivers/net/wireless/intel/iwlegacy/common.c
2577
q->write_actual = (q->write & ~0x7);
drivers/net/wireless/intel/iwlegacy/common.c
2578
il_wr(il, rx_wrt_ptr_reg, q->write_actual);
drivers/net/wireless/intel/iwlegacy/common.c
2583
q->write_actual = (q->write & ~0x7);
drivers/net/wireless/intel/iwlegacy/common.c
2584
il_wr(il, rx_wrt_ptr_reg, q->write_actual);
drivers/net/wireless/intel/iwlegacy/common.c
2587
q->need_update = 0;
drivers/net/wireless/intel/iwlegacy/common.c
2590
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/intel/iwlegacy/common.c
2712
int txq_id = txq->q.id;
drivers/net/wireless/intel/iwlegacy/common.c
2732
il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
drivers/net/wireless/intel/iwlegacy/common.c
2740
_il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
drivers/net/wireless/intel/iwlegacy/common.c
2752
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/common.c
2754
if (q->n_bd == 0)
drivers/net/wireless/intel/iwlegacy/common.c
2757
while (q->write_ptr != q->read_ptr) {
drivers/net/wireless/intel/iwlegacy/common.c
2759
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
drivers/net/wireless/intel/iwlegacy/common.c
2788
if (txq->q.n_bd)
drivers/net/wireless/intel/iwlegacy/common.c
2789
dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
drivers/net/wireless/intel/iwlegacy/common.c
2790
txq->tfds, txq->q.dma_addr);
drivers/net/wireless/intel/iwlegacy/common.c
2814
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/common.c
2817
if (q->n_bd == 0)
drivers/net/wireless/intel/iwlegacy/common.c
2820
while (q->read_ptr != q->write_ptr) {
drivers/net/wireless/intel/iwlegacy/common.c
2821
i = il_get_cmd_idx(q, q->read_ptr, 0);
drivers/net/wireless/intel/iwlegacy/common.c
2831
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
drivers/net/wireless/intel/iwlegacy/common.c
2834
i = q->n_win;
drivers/net/wireless/intel/iwlegacy/common.c
2868
if (txq->q.n_bd)
drivers/net/wireless/intel/iwlegacy/common.c
2869
dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
drivers/net/wireless/intel/iwlegacy/common.c
2870
txq->tfds, txq->q.dma_addr);
drivers/net/wireless/intel/iwlegacy/common.c
2907
il_queue_space(const struct il_queue *q)
drivers/net/wireless/intel/iwlegacy/common.c
2909
int s = q->read_ptr - q->write_ptr;
drivers/net/wireless/intel/iwlegacy/common.c
2911
if (q->read_ptr > q->write_ptr)
drivers/net/wireless/intel/iwlegacy/common.c
2912
s -= q->n_bd;
drivers/net/wireless/intel/iwlegacy/common.c
2915
s += q->n_win;
drivers/net/wireless/intel/iwlegacy/common.c
2929
il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
drivers/net/wireless/intel/iwlegacy/common.c
2937
q->n_bd = TFD_QUEUE_SIZE_MAX;
drivers/net/wireless/intel/iwlegacy/common.c
2939
q->n_win = slots;
drivers/net/wireless/intel/iwlegacy/common.c
2940
q->id = id;
drivers/net/wireless/intel/iwlegacy/common.c
2946
q->low_mark = q->n_win / 4;
drivers/net/wireless/intel/iwlegacy/common.c
2947
if (q->low_mark < 4)
drivers/net/wireless/intel/iwlegacy/common.c
2948
q->low_mark = 4;
drivers/net/wireless/intel/iwlegacy/common.c
2950
q->high_mark = q->n_win / 8;
drivers/net/wireless/intel/iwlegacy/common.c
2951
if (q->high_mark < 2)
drivers/net/wireless/intel/iwlegacy/common.c
2952
q->high_mark = 2;
drivers/net/wireless/intel/iwlegacy/common.c
2954
q->write_ptr = q->read_ptr = 0;
drivers/net/wireless/intel/iwlegacy/common.c
2982
dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
drivers/net/wireless/intel/iwlegacy/common.c
2986
txq->q.id = id;
drivers/net/wireless/intel/iwlegacy/common.c
3058
il_queue_init(il, &txq->q, slots, txq_id);
drivers/net/wireless/intel/iwlegacy/common.c
3095
il_queue_init(il, &txq->q, slots, txq_id);
drivers/net/wireless/intel/iwlegacy/common.c
3117
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/common.c
3146
if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
drivers/net/wireless/intel/iwlegacy/common.c
3154
idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
drivers/net/wireless/intel/iwlegacy/common.c
3188
cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
drivers/net/wireless/intel/iwlegacy/common.c
3200
q->write_ptr, idx, il->cmd_queue);
drivers/net/wireless/intel/iwlegacy/common.c
3206
le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
drivers/net/wireless/intel/iwlegacy/common.c
3230
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
drivers/net/wireless/intel/iwlegacy/common.c
3249
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/common.c
3252
if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
drivers/net/wireless/intel/iwlegacy/common.c
3254
"is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
drivers/net/wireless/intel/iwlegacy/common.c
3255
q->write_ptr, q->read_ptr);
drivers/net/wireless/intel/iwlegacy/common.c
3259
for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
drivers/net/wireless/intel/iwlegacy/common.c
3260
q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
drivers/net/wireless/intel/iwlegacy/common.c
3264
q->write_ptr, q->read_ptr);
drivers/net/wireless/intel/iwlegacy/common.c
3299
txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
drivers/net/wireless/intel/iwlegacy/common.c
3300
il->txq[il->cmd_queue].q.write_ptr)) {
drivers/net/wireless/intel/iwlegacy/common.c
3305
cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
drivers/net/wireless/intel/iwlegacy/common.c
4466
int q;
drivers/net/wireless/intel/iwlegacy/common.c
4480
q = AC_NUM - 1 - queue;
drivers/net/wireless/intel/iwlegacy/common.c
4484
il->qos_data.def_qos_parm.ac[q].cw_min =
drivers/net/wireless/intel/iwlegacy/common.c
4486
il->qos_data.def_qos_parm.ac[q].cw_max =
drivers/net/wireless/intel/iwlegacy/common.c
4488
il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
drivers/net/wireless/intel/iwlegacy/common.c
4489
il->qos_data.def_qos_parm.ac[q].edca_txop =
drivers/net/wireless/intel/iwlegacy/common.c
4492
il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
drivers/net/wireless/intel/iwlegacy/common.c
4734
struct il_queue *q;
drivers/net/wireless/intel/iwlegacy/common.c
4739
q = &il->txq[i].q;
drivers/net/wireless/intel/iwlegacy/common.c
4740
if (q->read_ptr == q->write_ptr)
drivers/net/wireless/intel/iwlegacy/common.c
4744
IL_ERR("Failed to flush queue %d\n", q->id);
drivers/net/wireless/intel/iwlegacy/common.c
4764
struct il_queue *q = &txq->q;
drivers/net/wireless/intel/iwlegacy/common.c
4769
if (q->read_ptr == q->write_ptr) {
drivers/net/wireless/intel/iwlegacy/common.c
4779
IL_ERR("Queue %d stuck for %u ms.\n", q->id,
drivers/net/wireless/intel/iwlegacy/common.h
151
struct il_queue q;
drivers/net/wireless/intel/iwlegacy/common.h
1749
void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
drivers/net/wireless/intel/iwlegacy/common.h
1750
int il_rx_queue_space(const struct il_rx_queue *q);
drivers/net/wireless/intel/iwlegacy/common.h
858
int il_queue_space(const struct il_queue *q);
drivers/net/wireless/intel/iwlegacy/common.h
860
il_queue_used(const struct il_queue *q, int i)
drivers/net/wireless/intel/iwlegacy/common.h
862
return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
drivers/net/wireless/intel/iwlegacy/common.h
863
i < q->write_ptr) : !(i <
drivers/net/wireless/intel/iwlegacy/common.h
864
q->read_ptr
drivers/net/wireless/intel/iwlegacy/common.h
866
q->
drivers/net/wireless/intel/iwlegacy/common.h
871
il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
drivers/net/wireless/intel/iwlegacy/common.h
879
return q->n_win; /* must be power of 2 */
drivers/net/wireless/intel/iwlegacy/common.h
882
return idx & (q->n_win - 1);
drivers/net/wireless/intel/iwlegacy/debug.c
818
struct il_queue *q;
drivers/net/wireless/intel/iwlegacy/debug.c
836
q = &txq->q;
drivers/net/wireless/intel/iwlegacy/debug.c
841
q->read_ptr, q->write_ptr,
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1153
int q;
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1170
q = AC_NUM - 1 - queue;
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1174
ctx->qos_data.def_qos_parm.ac[q].cw_min =
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1176
ctx->qos_data.def_qos_parm.ac[q].cw_max =
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1178
ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1179
ctx->qos_data.def_qos_parm.ac[q].edca_txop =
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
1182
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
465
int q;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
467
for (q = IWLAGN_FIRST_AMPDU_QUEUE;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
468
q < priv->trans->mac_cfg->base->num_of_queues; q++) {
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
469
if (!test_and_set_bit(q, priv->agg_q_alloc)) {
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
470
priv->queue_to_mac80211[q] = mq;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
471
return q;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
478
static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
480
clear_bit(q, priv->agg_q_alloc);
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
481
priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
684
int q, fifo;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
691
q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
697
iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h
19
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
557
#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT)
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
609
#define MSIX_FH_INT_CAUSES_Q(q) (q)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
332
#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
335
#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
338
#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
341
#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
344
#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
347
#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
349
#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
352
#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
355
#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8)
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
357
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
drivers/net/wireless/intel/iwlwifi/iwl-io.c
226
#define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; }
drivers/net/wireless/intel/iwlwifi/iwl-io.c
256
int i, q;
drivers/net/wireless/intel/iwlwifi/iwl-io.c
299
for (q = 0; q < num_q; q++) {
drivers/net/wireless/intel/iwlwifi/iwl-io.c
302
addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
drivers/net/wireless/intel/iwlwifi/iwl-io.c
305
get_rfh_string(addr), q,
drivers/net/wireless/intel/iwlwifi/iwl-io.c
320
for (q = 0; q < num_q; q++) {
drivers/net/wireless/intel/iwlwifi/iwl-io.c
323
addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
drivers/net/wireless/intel/iwlwifi/iwl-io.c
325
get_rfh_string(addr), q,
drivers/net/wireless/intel/iwlwifi/mld/d3.c
1504
u8 *cur_pn = mld_ptk_pn->q[que].pn[tid];
drivers/net/wireless/intel/iwlwifi/mld/d3.c
889
memcpy(mld_ptk_pn->q[i].pn[tid],
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
139
for (int q = 0; q < mld->trans->info.num_rxqs; q++)
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
140
spin_lock_init(&ll->pkts_counters[q].lock);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
170
for (int q = 0; q < mld->trans->info.num_rxqs; q++)
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
171
memset(ll->pkts_counters[q].vo_vi, 0,
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
172
sizeof(ll->pkts_counters[q].vo_vi));
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
324
for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
325
spin_lock_bh(&ll->pkts_counters[q].lock);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
326
ll->pkts_counters[q].vo_vi[mac] = 0;
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
327
spin_unlock_bh(&ll->pkts_counters[q].lock);
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
43
for (int q = 0; q < num_rx_q; q++) {
drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
45
&mld->low_latency.pkts_counters[q];
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
2114
*ptk_pn = kzalloc_flex(**ptk_pn, q, num_rx_queues);
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
2120
for (u8 q = 0; q < num_rx_queues; q++)
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
2121
memcpy((*ptk_pn)->q[q].pn[tid], seq.ccmp.pn,
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
1176
for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
1178
&mld_sta->mpdu_counters[q];
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
576
for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
drivers/net/wireless/intel/iwlwifi/mld/mlo.c
578
&mld_sta->mpdu_counters[q];
drivers/net/wireless/intel/iwlwifi/mld/rx.c
107
res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
drivers/net/wireless/intel/iwlwifi/mld/rx.c
113
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
drivers/net/wireless/intel/iwlwifi/mld/sta.c
675
for (int q = 0; q < mld->trans->info.num_rxqs; q++)
drivers/net/wireless/intel/iwlwifi/mld/sta.c
676
memset(dup_data[q].last_seq, 0xff,
drivers/net/wireless/intel/iwlwifi/mld/sta.c
677
sizeof(dup_data[q].last_seq));
drivers/net/wireless/intel/iwlwifi/mld/sta.c
702
for (int q = 0; q < mld->trans->info.num_rxqs; q++)
drivers/net/wireless/intel/iwlwifi/mld/sta.c
703
spin_lock_init(&mld_sta->mpdu_counters[q].lock);
drivers/net/wireless/intel/iwlwifi/mld/sta.h
69
} ____cacheline_aligned_in_smp q[];
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
1651
memcpy(ptk_pn->q[i].pn[tid],
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
95
const u8 *tmp = ptk_pn->q[i].pn[tid];
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4348
int tid, q;
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4351
ptk_pn = kzalloc_flex(*ptk_pn, q,
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4360
for (q = 0; q < mvm->trans->info.num_rxqs; q++)
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4361
memcpy(ptk_pn->q[q].pn[tid],
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
78
res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
84
memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1799
int q;
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1813
for (q = 0; q < mvm->trans->info.num_rxqs; q++)
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1814
memset(dup_data[q].last_seq, 0xff,
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
1815
sizeof(dup_data[q].last_seq));
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
278
} ____cacheline_aligned_in_smp q[];
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
664
static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
666
return index & (q->n_window - 1);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
690
int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
527
int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
538
if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
539
max = q->n_window;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
547
used = (q->write_ptr - q->read_ptr) &
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
1060
static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
1063
int index = iwl_txq_get_cmd_index(q, i);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
1064
int r = iwl_txq_get_cmd_index(q, read_ptr);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
1065
int w = iwl_txq_get_cmd_index(q, write_ptr);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
878
static int iwl_queue_init(struct iwl_txq *q, int slots_num)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
880
q->n_window = slots_num;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
888
q->low_mark = q->n_window / 4;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
889
if (q->low_mark < 4)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
890
q->low_mark = 4;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
892
q->high_mark = q->n_window / 8;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
893
if (q->high_mark < 2)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
894
q->high_mark = 2;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
896
q->write_ptr = 0;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
897
q->read_ptr = 0;
drivers/net/wireless/marvell/mwl8k.c
5456
int q = MWL8K_TX_WMM_QUEUES - 1 - queue;
drivers/net/wireless/marvell/mwl8k.c
5457
rc = mwl8k_cmd_set_edca_params(hw, q,
drivers/net/wireless/mediatek/mt76/debugfs.c
61
struct mt76_queue *q = dev->phy.q_tx[i];
drivers/net/wireless/mediatek/mt76/debugfs.c
63
if (!q)
drivers/net/wireless/mediatek/mt76/debugfs.c
67
i, q->queued, q->head, q->tail);
drivers/net/wireless/mediatek/mt76/debugfs.c
81
struct mt76_queue *q = &dev->q_rx[i];
drivers/net/wireless/mediatek/mt76/debugfs.c
83
queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
drivers/net/wireless/mediatek/mt76/debugfs.c
85
i, queued, q->head, q->tail);
drivers/net/wireless/mediatek/mt76/dma.c
1003
if (q->rx_head)
drivers/net/wireless/mediatek/mt76/dma.c
1004
data_len = q->buf_size;
drivers/net/wireless/mediatek/mt76/dma.c
1006
data_len = SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/wireless/mediatek/mt76/dma.c
1008
if (data_len < len + q->buf_offset) {
drivers/net/wireless/mediatek/mt76/dma.c
1009
dev_kfree_skb(q->rx_head);
drivers/net/wireless/mediatek/mt76/dma.c
1010
q->rx_head = NULL;
drivers/net/wireless/mediatek/mt76/dma.c
1014
if (q->rx_head) {
drivers/net/wireless/mediatek/mt76/dma.c
1015
mt76_add_fragment(dev, q, data, len, more, info,
drivers/net/wireless/mediatek/mt76/dma.c
1024
skb = napi_build_skb(data, q->buf_size);
drivers/net/wireless/mediatek/mt76/dma.c
1028
skb_reserve(skb, q->buf_offset);
drivers/net/wireless/mediatek/mt76/dma.c
1037
q->rx_head = skb;
drivers/net/wireless/mediatek/mt76/dma.c
1041
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
drivers/net/wireless/mediatek/mt76/dma.c
1048
mt76_dma_rx_fill(dev, q, true);
drivers/net/wireless/mediatek/mt76/dma.c
1169
struct mt76_queue *q = &dev->q_rx[i];
drivers/net/wireless/mediatek/mt76/dma.c
1172
mt76_queue_is_wed_rro(q))
drivers/net/wireless/mediatek/mt76/dma.c
1176
mt76_dma_rx_cleanup(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
1178
page_pool_destroy(q->page_pool);
drivers/net/wireless/mediatek/mt76/dma.c
158
mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/dma.c
160
if (!mt76_queue_is_wed_rro(q))
drivers/net/wireless/mediatek/mt76/dma.c
163
q->magic_cnt = 0;
drivers/net/wireless/mediatek/mt76/dma.c
164
if (mt76_queue_is_wed_rro_ind(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
170
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
drivers/net/wireless/mediatek/mt76/dma.c
171
for (i = 0; i < q->ndesc; i++) {
drivers/net/wireless/mediatek/mt76/dma.c
177
} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
178
struct mt76_rro_rxdmad_c *dmad = (void *)q->desc;
drivers/net/wireless/mediatek/mt76/dma.c
183
for (i = 0; i < q->ndesc; i++)
drivers/net/wireless/mediatek/mt76/dma.c
189
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/dma.c
191
Q_WRITE(q, desc_base, q->desc_dma);
drivers/net/wireless/mediatek/mt76/dma.c
192
if ((q->flags & MT_QFLAG_WED_RRO_EN) && !mt76_npu_device_active(dev))
drivers/net/wireless/mediatek/mt76/dma.c
193
Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
drivers/net/wireless/mediatek/mt76/dma.c
195
Q_WRITE(q, ring_size, q->ndesc);
drivers/net/wireless/mediatek/mt76/dma.c
197
if (mt76_queue_is_npu_tx(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
198
writel(q->desc_dma, &q->regs->desc_base);
drivers/net/wireless/mediatek/mt76/dma.c
199
writel(q->ndesc, &q->regs->ring_size);
drivers/net/wireless/mediatek/mt76/dma.c
201
q->head = Q_READ(q, dma_idx);
drivers/net/wireless/mediatek/mt76/dma.c
202
q->tail = q->head;
drivers/net/wireless/mediatek/mt76/dma.c
205
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
208
if (!q || !q->ndesc)
drivers/net/wireless/mediatek/mt76/dma.c
211
if (!mt76_queue_is_wed_rro_ind(q) &&
drivers/net/wireless/mediatek/mt76/dma.c
212
!mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
216
for (i = 0; i < q->ndesc; i++)
drivers/net/wireless/mediatek/mt76/dma.c
217
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
drivers/net/wireless/mediatek/mt76/dma.c
220
mt76_dma_queue_magic_cnt_init(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
222
if (mt76_queue_is_emi(q))
drivers/net/wireless/mediatek/mt76/dma.c
223
*q->emi_cpu_idx = 0;
drivers/net/wireless/mediatek/mt76/dma.c
225
Q_WRITE(q, cpu_idx, 0);
drivers/net/wireless/mediatek/mt76/dma.c
226
Q_WRITE(q, dma_idx, 0);
drivers/net/wireless/mediatek/mt76/dma.c
228
mt76_dma_sync_idx(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
232
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
235
struct mt76_queue_entry *entry = &q->entry[q->head];
drivers/net/wireless/mediatek/mt76/dma.c
239
int idx = q->head;
drivers/net/wireless/mediatek/mt76/dma.c
242
if (mt76_queue_is_wed_rro_ind(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
245
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
drivers/net/wireless/mediatek/mt76/dma.c
246
data = &rro_desc[q->head];
drivers/net/wireless/mediatek/mt76/dma.c
248
} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
249
data = &q->desc[q->head];
drivers/net/wireless/mediatek/mt76/dma.c
253
desc = &q->desc[q->head];
drivers/net/wireless/mediatek/mt76/dma.c
259
if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
273
txwi->qid = q - dev->q_rx;
drivers/net/wireless/mediatek/mt76/dma.c
276
if (mt76_queue_is_wed_rro_msdu_pg(q) &&
drivers/net/wireless/mediatek/mt76/dma.c
278
if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data))
drivers/net/wireless/mediatek/mt76/dma.c
282
if (q->flags & MT_QFLAG_WED_RRO_EN) {
drivers/net/wireless/mediatek/mt76/dma.c
283
info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
drivers/net/wireless/mediatek/mt76/dma.c
284
if ((q->head + 1) == q->ndesc)
drivers/net/wireless/mediatek/mt76/dma.c
285
q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
drivers/net/wireless/mediatek/mt76/dma.c
300
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/dma.c
301
q->queued++;
drivers/net/wireless/mediatek/mt76/dma.c
307
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
317
q->entry[q->head].txwi = DMA_DUMMY_DATA;
drivers/net/wireless/mediatek/mt76/dma.c
318
q->entry[q->head].skip_buf0 = true;
drivers/net/wireless/mediatek/mt76/dma.c
324
idx = q->head;
drivers/net/wireless/mediatek/mt76/dma.c
325
next = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/dma.c
327
desc = &q->desc[idx];
drivers/net/wireless/mediatek/mt76/dma.c
328
entry = &q->entry[idx];
drivers/net/wireless/mediatek/mt76/dma.c
364
q->head = next;
drivers/net/wireless/mediatek/mt76/dma.c
365
q->queued++;
drivers/net/wireless/mediatek/mt76/dma.c
368
q->entry[idx].txwi = txwi;
drivers/net/wireless/mediatek/mt76/dma.c
369
q->entry[idx].skb = skb;
drivers/net/wireless/mediatek/mt76/dma.c
370
q->entry[idx].wcid = 0xffff;
drivers/net/wireless/mediatek/mt76/dma.c
376
mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
drivers/net/wireless/mediatek/mt76/dma.c
379
struct mt76_queue_entry *e = &q->entry[idx];
drivers/net/wireless/mediatek/mt76/dma.c
397
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/dma.c
400
if (mt76_queue_is_emi(q))
drivers/net/wireless/mediatek/mt76/dma.c
401
*q->emi_cpu_idx = cpu_to_le16(q->head);
drivers/net/wireless/mediatek/mt76/dma.c
403
Q_WRITE(q, cpu_idx, q->head);
drivers/net/wireless/mediatek/mt76/dma.c
407
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
drivers/net/wireless/mediatek/mt76/dma.c
412
if (!q || !q->ndesc)
drivers/net/wireless/mediatek/mt76/dma.c
415
spin_lock_bh(&q->cleanup_lock);
drivers/net/wireless/mediatek/mt76/dma.c
419
last = Q_READ(q, dma_idx);
drivers/net/wireless/mediatek/mt76/dma.c
421
while (q->queued > 0 && q->tail != last) {
drivers/net/wireless/mediatek/mt76/dma.c
422
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
drivers/net/wireless/mediatek/mt76/dma.c
423
mt76_npu_txdesc_cleanup(q, q->tail);
drivers/net/wireless/mediatek/mt76/dma.c
424
mt76_queue_tx_complete(dev, q, &entry);
drivers/net/wireless/mediatek/mt76/dma.c
431
if (!flush && q->tail == last)
drivers/net/wireless/mediatek/mt76/dma.c
432
last = Q_READ(q, dma_idx);
drivers/net/wireless/mediatek/mt76/dma.c
434
spin_unlock_bh(&q->cleanup_lock);
drivers/net/wireless/mediatek/mt76/dma.c
437
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
438
mt76_dma_sync_idx(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
439
mt76_dma_kick_queue(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
440
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
443
if (!q->queued)
drivers/net/wireless/mediatek/mt76/dma.c
448
mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
451
struct mt76_queue_entry *e = &q->entry[idx];
drivers/net/wireless/mediatek/mt76/dma.c
465
q = &dev->q_rx[t->qid];
drivers/net/wireless/mediatek/mt76/dma.c
467
SKB_WITH_OVERHEAD(q->buf_size),
drivers/net/wireless/mediatek/mt76/dma.c
468
page_pool_get_dma_dir(q->page_pool));
drivers/net/wireless/mediatek/mt76/dma.c
491
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
drivers/net/wireless/mediatek/mt76/dma.c
494
struct mt76_queue_entry *e = &q->entry[idx];
drivers/net/wireless/mediatek/mt76/dma.c
495
struct mt76_desc *desc = &q->desc[idx];
drivers/net/wireless/mediatek/mt76/dma.c
499
if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
drivers/net/wireless/mediatek/mt76/dma.c
500
buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
drivers/net/wireless/mediatek/mt76/dma.c
502
if (mt76_queue_is_wed_rro(q))
drivers/net/wireless/mediatek/mt76/dma.c
518
if (mt76_queue_is_wed_rx(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
526
SKB_WITH_OVERHEAD(q->buf_size),
drivers/net/wireless/mediatek/mt76/dma.c
527
page_pool_get_dma_dir(q->page_pool));
drivers/net/wireless/mediatek/mt76/dma.c
538
SKB_WITH_OVERHEAD(q->buf_size),
drivers/net/wireless/mediatek/mt76/dma.c
539
page_pool_get_dma_dir(q->page_pool));
drivers/net/wireless/mediatek/mt76/dma.c
548
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
drivers/net/wireless/mediatek/mt76/dma.c
551
int idx = q->tail;
drivers/net/wireless/mediatek/mt76/dma.c
554
if (!q->queued)
drivers/net/wireless/mediatek/mt76/dma.c
557
if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q))
drivers/net/wireless/mediatek/mt76/dma.c
560
if (mt76_queue_is_wed_rro_ind(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
567
cmd = q->entry[idx].buf;
drivers/net/wireless/mediatek/mt76/dma.c
570
if (magic_cnt != q->magic_cnt)
drivers/net/wireless/mediatek/mt76/dma.c
573
if (q->tail == q->ndesc - 1)
drivers/net/wireless/mediatek/mt76/dma.c
574
q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
drivers/net/wireless/mediatek/mt76/dma.c
575
} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
582
dmad = q->entry[idx].buf;
drivers/net/wireless/mediatek/mt76/dma.c
585
if (magic_cnt != q->magic_cnt)
drivers/net/wireless/mediatek/mt76/dma.c
588
if (q->tail == q->ndesc - 1)
drivers/net/wireless/mediatek/mt76/dma.c
589
q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
drivers/net/wireless/mediatek/mt76/dma.c
592
q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
drivers/net/wireless/mediatek/mt76/dma.c
593
else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
drivers/net/wireless/mediatek/mt76/dma.c
597
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/dma.c
598
q->queued--;
drivers/net/wireless/mediatek/mt76/dma.c
600
return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
drivers/net/wireless/mediatek/mt76/dma.c
604
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
613
if (q->queued + 1 >= q->ndesc - 1)
drivers/net/wireless/mediatek/mt76/dma.c
624
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
625
mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
drivers/net/wireless/mediatek/mt76/dma.c
626
mt76_dma_kick_queue(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
627
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
637
mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
696
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
drivers/net/wireless/mediatek/mt76/dma.c
710
return mt76_npu_dma_add_buf(phy, q, skb, &tx_info.buf[1], txwi);
drivers/net/wireless/mediatek/mt76/dma.c
712
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
drivers/net/wireless/mediatek/mt76/dma.c
744
mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
747
int len = SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/wireless/mediatek/mt76/dma.c
750
if (!q->ndesc)
drivers/net/wireless/mediatek/mt76/dma.c
753
while (q->queued < q->ndesc - 1) {
drivers/net/wireless/mediatek/mt76/dma.c
758
if (mt76_queue_is_wed_rro_ind(q) ||
drivers/net/wireless/mediatek/mt76/dma.c
759
mt76_queue_is_wed_rro_rxdmad_c(q))
drivers/net/wireless/mediatek/mt76/dma.c
762
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
drivers/net/wireless/mediatek/mt76/dma.c
767
offset + q->buf_offset;
drivers/net/wireless/mediatek/mt76/dma.c
769
qbuf.len = len - q->buf_offset;
drivers/net/wireless/mediatek/mt76/dma.c
771
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
drivers/net/wireless/mediatek/mt76/dma.c
778
if (frames || mt76_queue_is_wed_rx(q))
drivers/net/wireless/mediatek/mt76/dma.c
779
mt76_dma_kick_queue(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
784
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
789
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
790
frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
drivers/net/wireless/mediatek/mt76/dma.c
791
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
797
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.c
803
spin_lock_init(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
804
spin_lock_init(&q->cleanup_lock);
drivers/net/wireless/mediatek/mt76/dma.c
806
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
drivers/net/wireless/mediatek/mt76/dma.c
807
q->ndesc = n_desc;
drivers/net/wireless/mediatek/mt76/dma.c
808
q->buf_size = bufsize;
drivers/net/wireless/mediatek/mt76/dma.c
809
q->hw_idx = idx;
drivers/net/wireless/mediatek/mt76/dma.c
810
q->dev = dev;
drivers/net/wireless/mediatek/mt76/dma.c
812
if (mt76_queue_is_wed_rro_ind(q))
drivers/net/wireless/mediatek/mt76/dma.c
814
else if (mt76_queue_is_npu_tx(q))
drivers/net/wireless/mediatek/mt76/dma.c
816
else if (mt76_queue_is_npu_rx(q))
drivers/net/wireless/mediatek/mt76/dma.c
821
q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
drivers/net/wireless/mediatek/mt76/dma.c
822
&q->desc_dma, GFP_KERNEL);
drivers/net/wireless/mediatek/mt76/dma.c
823
if (!q->desc)
drivers/net/wireless/mediatek/mt76/dma.c
826
mt76_dma_queue_magic_cnt_init(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
827
size = q->ndesc * sizeof(*q->entry);
drivers/net/wireless/mediatek/mt76/dma.c
828
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
drivers/net/wireless/mediatek/mt76/dma.c
829
if (!q->entry)
drivers/net/wireless/mediatek/mt76/dma.c
832
ret = mt76_create_page_pool(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
836
mt76_npu_queue_setup(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
837
ret = mt76_wed_dma_setup(dev, q, false);
drivers/net/wireless/mediatek/mt76/dma.c
842
if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
drivers/net/wireless/mediatek/mt76/dma.c
843
mt76_queue_is_wed_tx_free(q))
drivers/net/wireless/mediatek/mt76/dma.c
850
mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q));
drivers/net/wireless/mediatek/mt76/dma.c
856
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/dma.c
861
if (!q->ndesc)
drivers/net/wireless/mediatek/mt76/dma.c
864
if (mt76_queue_is_npu(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
865
mt76_npu_queue_cleanup(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
870
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
871
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
drivers/net/wireless/mediatek/mt76/dma.c
872
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
877
if (!mt76_queue_is_wed_rro(q))
drivers/net/wireless/mediatek/mt76/dma.c
881
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
882
if (q->rx_head) {
drivers/net/wireless/mediatek/mt76/dma.c
883
dev_kfree_skb(q->rx_head);
drivers/net/wireless/mediatek/mt76/dma.c
884
q->rx_head = NULL;
drivers/net/wireless/mediatek/mt76/dma.c
887
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/dma.c
893
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/dma.c
895
if (!q->ndesc)
drivers/net/wireless/mediatek/mt76/dma.c
898
if (!mt76_queue_is_wed_rro_ind(q) &&
drivers/net/wireless/mediatek/mt76/dma.c
899
!mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
drivers/net/wireless/mediatek/mt76/dma.c
902
for (i = 0; i < q->ndesc; i++)
drivers/net/wireless/mediatek/mt76/dma.c
903
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
drivers/net/wireless/mediatek/mt76/dma.c
906
mt76_dma_rx_cleanup(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
909
mt76_wed_dma_setup(dev, q, true);
drivers/net/wireless/mediatek/mt76/dma.c
911
if (mt76_queue_is_wed_tx_free(q))
drivers/net/wireless/mediatek/mt76/dma.c
915
mt76_queue_is_wed_rro(q))
drivers/net/wireless/mediatek/mt76/dma.c
918
mt76_dma_sync_idx(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
919
if (mt76_queue_is_npu(q))
drivers/net/wireless/mediatek/mt76/dma.c
920
mt76_npu_fill_rx_queue(dev, q);
drivers/net/wireless/mediatek/mt76/dma.c
922
mt76_dma_rx_fill(dev, q, false);
drivers/net/wireless/mediatek/mt76/dma.c
926
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
drivers/net/wireless/mediatek/mt76/dma.c
929
struct sk_buff *skb = q->rx_head;
drivers/net/wireless/mediatek/mt76/dma.c
935
int offset = data - page_address(page) + q->buf_offset;
drivers/net/wireless/mediatek/mt76/dma.c
937
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
drivers/net/wireless/mediatek/mt76/dma.c
945
q->rx_head = NULL;
drivers/net/wireless/mediatek/mt76/dma.c
947
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
drivers/net/wireless/mediatek/mt76/dma.c
953
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
drivers/net/wireless/mediatek/mt76/dma.c
959
bool allow_direct = !mt76_queue_is_wed_rx(q);
drivers/net/wireless/mediatek/mt76/dma.c
962
if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
drivers/net/wireless/mediatek/mt76/dma.c
964
mt76_queue_is_wed_tx_free(q))) {
drivers/net/wireless/mediatek/mt76/dma.c
965
dma_idx = Q_READ(q, dma_idx);
drivers/net/wireless/mediatek/mt76/dma.c
974
if (q->tail == dma_idx)
drivers/net/wireless/mediatek/mt76/dma.c
975
dma_idx = Q_READ(q, dma_idx);
drivers/net/wireless/mediatek/mt76/dma.c
977
if (q->tail == dma_idx)
drivers/net/wireless/mediatek/mt76/dma.c
981
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
drivers/net/wireless/mediatek/mt76/dma.c
991
if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
drivers/net/wireless/mediatek/mt76/dma.c
994
if (mt76_queue_is_wed_rro(q) &&
drivers/net/wireless/mediatek/mt76/dma.c
995
!mt76_queue_is_wed_rro_rxdmad_c(q)) {
drivers/net/wireless/mediatek/mt76/dma.h
169
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.h
171
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/dma.h
175
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/dma.h
177
dev->queue_ops->reset_q(dev, q, true);
drivers/net/wireless/mediatek/mt76/dma.h
179
mt76_wed_dma_setup(dev, q, true);
drivers/net/wireless/mediatek/mt76/dma.h
82
npu = rcu_dereference(q->dev->mmio.npu); \
drivers/net/wireless/mediatek/mt76/dma.h
99
npu = rcu_dereference(q->dev->mmio.npu); \
drivers/net/wireless/mediatek/mt76/mac80211.c
1547
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mac80211.c
1555
while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
drivers/net/wireless/mediatek/mt76/mac80211.c
614
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mac80211.c
616
bool is_qrx = mt76_queue_is_rx(dev, q);
drivers/net/wireless/mediatek/mt76/mac80211.c
623
int idx = is_qrx ? q - dev->q_rx : -1;
drivers/net/wireless/mediatek/mt76/mac80211.c
626
if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
drivers/net/wireless/mediatek/mt76/mac80211.c
653
q->page_pool = page_pool_create(&pp_params);
drivers/net/wireless/mediatek/mt76/mac80211.c
654
if (IS_ERR(q->page_pool)) {
drivers/net/wireless/mediatek/mt76/mac80211.c
655
int err = PTR_ERR(q->page_pool);
drivers/net/wireless/mediatek/mt76/mac80211.c
657
q->page_pool = NULL;
drivers/net/wireless/mediatek/mt76/mac80211.c
881
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mac80211.c
883
struct sk_buff *skb = phy->rx_amsdu[q].head;
drivers/net/wireless/mediatek/mt76/mac80211.c
887
phy->rx_amsdu[q].head = NULL;
drivers/net/wireless/mediatek/mt76/mac80211.c
888
phy->rx_amsdu[q].tail = NULL;
drivers/net/wireless/mediatek/mt76/mac80211.c
914
__skb_queue_tail(&dev->rx_skb[q], skb);
drivers/net/wireless/mediatek/mt76/mac80211.c
917
static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mac80211.c
922
if (phy->rx_amsdu[q].head &&
drivers/net/wireless/mediatek/mt76/mac80211.c
924
status->seqno != phy->rx_amsdu[q].seqno))
drivers/net/wireless/mediatek/mt76/mac80211.c
925
mt76_rx_release_amsdu(phy, q);
drivers/net/wireless/mediatek/mt76/mac80211.c
927
if (!phy->rx_amsdu[q].head) {
drivers/net/wireless/mediatek/mt76/mac80211.c
928
phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
drivers/net/wireless/mediatek/mt76/mac80211.c
929
phy->rx_amsdu[q].seqno = status->seqno;
drivers/net/wireless/mediatek/mt76/mac80211.c
930
phy->rx_amsdu[q].head = skb;
drivers/net/wireless/mediatek/mt76/mac80211.c
932
*phy->rx_amsdu[q].tail = skb;
drivers/net/wireless/mediatek/mt76/mac80211.c
933
phy->rx_amsdu[q].tail = &skb->next;
drivers/net/wireless/mediatek/mt76/mac80211.c
937
mt76_rx_release_amsdu(phy, q);
drivers/net/wireless/mediatek/mt76/mac80211.c
940
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
drivers/net/wireless/mediatek/mt76/mac80211.c
952
phy->test.rx_stats.packets[q]++;
drivers/net/wireless/mediatek/mt76/mac80211.c
954
phy->test.rx_stats.fcs_error[q]++;
drivers/net/wireless/mediatek/mt76/mac80211.c
958
mt76_rx_release_burst(phy, q, skb);
drivers/net/wireless/mediatek/mt76/mac80211.c
964
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mac80211.c
968
q = phy->q_tx[i];
drivers/net/wireless/mediatek/mt76/mac80211.c
969
if (q && q->queued)
drivers/net/wireless/mediatek/mt76/mt76.h
1239
int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
drivers/net/wireless/mediatek/mt76/mt76.h
1251
static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
1343
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt76.h
1345
q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags);
drivers/net/wireless/mediatek/mt76/mt76.h
1346
if (IS_ERR(q))
drivers/net/wireless/mediatek/mt76/mt76.h
1347
return PTR_ERR(q);
drivers/net/wireless/mediatek/mt76/mt76.h
1349
phy->q_tx[qid] = q;
drivers/net/wireless/mediatek/mt76/mt76.h
1357
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt76.h
1359
q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0);
drivers/net/wireless/mediatek/mt76/mt76.h
1360
if (IS_ERR(q))
drivers/net/wireless/mediatek/mt76/mt76.h
1361
return PTR_ERR(q);
drivers/net/wireless/mediatek/mt76/mt76.h
1363
dev->q_mcu[qid] = q;
drivers/net/wireless/mediatek/mt76/mt76.h
1512
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
drivers/net/wireless/mediatek/mt76/mt76.h
1638
int mt76_npu_dma_add_buf(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
1641
int mt76_npu_rx_queue_init(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
1642
int mt76_npu_fill_rx_queue(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
1643
void mt76_npu_queue_cleanup(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
1647
void mt76_npu_queue_setup(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
1648
void mt76_npu_txdesc_cleanup(struct mt76_queue *q, int index);
drivers/net/wireless/mediatek/mt76/mt76.h
1659
struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
1668
struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1674
struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1693
struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1697
static inline void mt76_npu_txdesc_cleanup(struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
1770
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt76.h
1774
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
1920
static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1925
if (q == &dev->q_rx[i])
drivers/net/wireless/mediatek/mt76/mt76.h
1932
static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1934
return (q->flags & MT_QFLAG_WED) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1935
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
drivers/net/wireless/mediatek/mt76/mt76.h
1938
static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1940
return q->flags & MT_QFLAG_WED_RRO;
drivers/net/wireless/mediatek/mt76/mt76.h
1943
static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1945
return mt76_queue_is_wed_rro(q) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1946
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
drivers/net/wireless/mediatek/mt76/mt76.h
1949
static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1951
return mt76_queue_is_wed_rro(q) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1952
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C;
drivers/net/wireless/mediatek/mt76/mt76.h
1955
static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1957
return mt76_queue_is_wed_rro(q) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1958
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA;
drivers/net/wireless/mediatek/mt76/mt76.h
1961
static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1963
return mt76_queue_is_wed_rro(q) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1964
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) ==
drivers/net/wireless/mediatek/mt76/mt76.h
1968
static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1970
return (q->flags & MT_QFLAG_WED) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1971
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
drivers/net/wireless/mediatek/mt76/mt76.h
1974
static inline bool mt76_queue_is_emi(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1976
return q->flags & MT_QFLAG_EMI_EN;
drivers/net/wireless/mediatek/mt76/mt76.h
1979
static inline bool mt76_queue_is_npu(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1981
return q->flags & MT_QFLAG_NPU;
drivers/net/wireless/mediatek/mt76/mt76.h
1984
static inline bool mt76_queue_is_npu_tx(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1986
return mt76_queue_is_npu(q) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1987
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TX;
drivers/net/wireless/mediatek/mt76/mt76.h
1990
static inline bool mt76_queue_is_npu_rx(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/mt76.h
1992
return mt76_queue_is_npu(q) &&
drivers/net/wireless/mediatek/mt76/mt76.h
1993
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
drivers/net/wireless/mediatek/mt76/mt76.h
2003
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
2013
mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
drivers/net/wireless/mediatek/mt76/mt76.h
2017
page = page_pool_alloc_frag(q->page_pool, offset, size,
drivers/net/wireless/mediatek/mt76/mt76.h
294
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
298
int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
302
int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
305
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
drivers/net/wireless/mediatek/mt76/mt76.h
310
void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
316
void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
318
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
drivers/net/wireless/mediatek/mt76/mt76.h
320
void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76.h
562
void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt76.h
565
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
drivers/net/wireless/mediatek/mt76/mt76.h
568
int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
105
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
114
__skb_queue_head_init(&data.q);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
128
q = dev->mphy.q_tx[MT_TXQ_BEACON];
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
129
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
133
mt76_queue_kick(dev, q);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
134
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
140
q = dev->mphy.q_tx[MT_TXQ_CAB];
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
142
nframes = skb_queue_len(&data.q);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
146
} while (nframes != skb_queue_len(&data.q) &&
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
147
skb_queue_len(&data.q) < 8);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
149
if (skb_queue_empty(&data.q))
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
159
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
160
while ((skb = __skb_dequeue(&data.q)) != NULL) {
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
165
mt76_tx_queue_skb(dev, q, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
167
mt76_queue_kick(dev, q);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
168
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
7
struct sk_buff_head q;
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
96
__skb_queue_tail(&data->q, skb);
drivers/net/wireless/mediatek/mt76/mt7603/core.c
10
mt7603_irq_enable(dev, MT_INT_RX_DONE(q));
drivers/net/wireless/mediatek/mt76/mt7603/core.c
6
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
105
if (q == MT_RXQ_MCU) {
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
124
mt76_rx(&dev->mt76, q, skb);
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
135
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
140
err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
95
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1531
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1536
q = dev->mphy.q_tx[i];
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1538
if (!q->queued)
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1542
dma_idx = readl(&q->regs->dma_idx);
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
1546
dma_idx != readl(&q->regs->cpu_idx))
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
462
struct mt76_queue *q = dev->mphy.q_tx[i];
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
463
u8 qidx = q->hw_idx;
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
925
struct mt76_queue *q = dev->mphy.q_tx[qid];
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
959
FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
243
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
245
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
395
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
405
struct mt76_queue *q = queue_map[i].q;
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
409
queue_map[i].queue, q->queued, q->head,
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
410
q->tail);
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
1631
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
1661
mt76_rx(&dev->mt76, q, skb);
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
67
mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
69
mt76_connac_irq_enable(mdev, MT_INT_RX_DONE(q));
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
507
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
44
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
drivers/net/wireless/mediatek/mt76/mt76x02.h
190
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt76x02.h
192
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
drivers/net/wireless/mediatek/mt76/mt76x02.h
211
struct sk_buff_head q;
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
151
__skb_queue_tail(&data->q, skb);
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
175
__skb_queue_tail(&data->q, skb);
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
187
nframes = skb_queue_len(&data->q);
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
191
} while (nframes != skb_queue_len(&data->q) &&
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
192
skb_queue_len(&data->q) < max_nframes);
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
194
if (!skb_queue_len(&data->q))
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
115
mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
120
err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
178
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
18
struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
230
q = &dev->mt76.q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
231
q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
232
ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
249
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
254
mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
28
__skb_queue_head_init(&data.q);
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
353
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
357
q = dev->mphy.q_tx[i];
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
360
dma_idx = readl(&q->regs->dma_idx);
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
363
if (!q->queued || prev_dma_idx != dma_idx) {
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
40
while ((skb = __skb_dequeue(&data.q)) != NULL)
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
53
if (!skb_queue_len(&data.q))
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
63
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
64
while ((skb = __skb_dequeue(&data.q)) != NULL) {
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
69
mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid,
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
72
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
35
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
41
if (q == MT_RXQ_MCU) {
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
52
mt76_rx(mdev, q, skb);
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
194
__skb_queue_head_init(&data.q);
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
206
while ((skb = __skb_dequeue(&data.q)) != NULL)
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
219
while ((skb = __skb_dequeue(&data.q)) != NULL)
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
948
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
960
struct mt76_queue *q = queue_map[i].q;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
962
if (!q)
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
966
queue_map[i].queue, q->queued, q->head,
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
967
q->tail);
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
43
#define Q_CONFIG(q, wfdma, int, id) do { \
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
45
dev->wfdma_mask |= (1 << (q)); \
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
46
dev->q_int_mask[(q)] = int; \
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
47
dev->q_id[(q)] = id; \
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
50
#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
51
#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
52
#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1063
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1098
if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
1099
mt76_rx(&dev->mt76, q, skb);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
249
mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
259
if (!mt76_queue_is_wed_rx(q))
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
277
enum mt76_rxq_id q, u32 *info)
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
541
mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2549
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2554
e->aifs = q->aifs;
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2555
e->txop = cpu_to_le16(q->txop);
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2557
if (q->cw_min)
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2558
e->cw_min = fls(q->cw_min);
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2562
if (q->cw_max)
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
2563
e->cw_max = cpu_to_le16(fls(q->cw_max));
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
818
enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
822
mt7915_irq_enable(dev, MT_INT_RX(q));
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
599
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
682
#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
683
#define __TXQ(q) (__RXQ(q) + MT_RXQ_BAND2)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
685
#define MT_Q_ID(q) (dev->q_id[(q)])
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
686
#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
689
#define MT_MCUQ_ID(q) MT_Q_ID(q)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
690
#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
691
#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
693
#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
694
#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
695
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
697
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
698
MT_MCUQ_ID(q)* 0x4)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
699
#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
700
MT_RXQ_ID(q)* 0x4)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
701
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
702
MT_TXQ_ID(q)* 0x4)
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
731
#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
732
#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
727
enum mt76_rxq_id q;
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
779
q = phy->mt76->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
780
mphy->test.rx_stats.packets[q] += fcs_err;
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
781
mphy->test.rx_stats.fcs_error[q] += fcs_err;
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
102
u8 q = mt76_connac_lmac_mapping(i);
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
103
u32 tx_cur = tx_time[q];
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
104
u32 rx_cur = rx_time[q];
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
584
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
616
mt76_rx(&dev->mt76, q, skb);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
721
struct ieee80211_tx_queue_params *q = &mvif->bss_conf.queue_params[ac];
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
724
e->aifs = cpu_to_le16(q->aifs);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
725
e->txop = cpu_to_le16(q->txop);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
727
if (q->cw_min)
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
728
e->cw_min = cpu_to_le16(q->cw_min);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
732
if (q->cw_max)
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
733
e->cw_max = cpu_to_le16(q->cw_max);
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
747
struct ieee80211_he_mu_edca_param_ac_rec *q;
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
753
q = &mvif->bss_conf.queue_params[ac].mu_edca_param_rec;
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
756
e->cw_min = q->ecw_min_max & 0xf;
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
757
e->cw_max = (q->ecw_min_max & 0xf0) >> 4;
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
758
e->aifsn = q->aifsn;
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
759
e->timer = q->mu_edca_timer;
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
263
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1205
void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
1244
mt76_rx(&dev->mt76, q, skb);
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
89
u8 q = mt76_connac_lmac_mapping(i);
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
90
u32 tx_cur = tx_time[q];
drivers/net/wireless/mediatek/mt76/mt7925/mac.c
91
u32 rx_cur = rx_time[q];
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1163
struct ieee80211_tx_queue_params *q = &mconf->queue_params[ac];
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1172
e->aifs = q->aifs;
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1173
e->txop = cpu_to_le16(q->txop);
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1175
if (q->cw_min)
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1176
e->cw_min = fls(q->cw_min);
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1180
if (q->cw_max)
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
1181
e->cw_max = fls(q->cw_max);
drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
282
void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt792x.h
424
void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
102
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
112
struct mt76_queue *q = queue_map[i].q;
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
114
if (!q)
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
119
queue_map[i].queue, q->queued, q->head,
drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c
120
q->tail);
drivers/net/wireless/mediatek/mt76/mt792x_dma.c
76
void mt792x_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mt792x_dma.c
81
if (q == MT_RXQ_MAIN)
drivers/net/wireless/mediatek/mt76/mt792x_dma.c
83
else if (q == MT_RXQ_MCU_WA)
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
760
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
774
queue_map[1].q = phy->mt76->q_tx[MT_TXQ_BE];
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
778
queue_map[2].q = phy->mt76->q_tx[MT_TXQ_BE];
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
782
struct mt76_queue *q = queue_map[i].q;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
784
if (!q)
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
788
queue_map[i].queue, q->queued, q->head,
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
789
q->tail);
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
48
#define Q_CONFIG(q, wfdma, int, id) do { \
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
50
dev->q_wfdma_mask |= (1 << (q)); \
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
51
dev->q_int_mask[(q)] = int; \
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
52
dev->q_id[(q)] = id; \
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
55
#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
56
#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
57
#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
973
struct mt76_queue *q = &dev->mt76.q_rx[i];
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
976
if (mt76_queue_is_wed_rro(q) ||
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
977
mt76_queue_is_wed_tx_free(q)) {
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
978
if (force && mt76_queue_is_wed_rro_data(q))
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
979
mt76_queue_reset(dev, q, false);
drivers/net/wireless/mediatek/mt76/mt7996/dma.c
983
mt76_queue_reset(dev, q, true);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1655
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1675
q == MT_RXQ_TXFREE_BAND2) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1696
if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1697
mt76_rx(&dev->mt76, q, skb);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1846
int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1861
p->q = q;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
190
u8 q = mt76_connac_lmac_mapping(i);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
191
u32 tx_cur = tx_time[q];
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1916
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
192
u32 rx_cur = rx_time[q];
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1949
SKB_WITH_OVERHEAD(p->q->buf_size),
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1950
page_pool_get_dma_dir(p->q->page_pool));
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1966
q = &mdev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1968
SKB_WITH_OVERHEAD(q->buf_size),
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1969
page_pool_get_dma_dir(q->page_pool));
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1977
if (q->rx_head)
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1978
data_len = q->buf_size;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1980
data_len = SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1982
if (data_len < len + q->buf_offset) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1983
dev_kfree_skb(q->rx_head);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1985
q->rx_head = NULL;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1991
if (q->rx_head) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1995
dev_kfree_skb(q->rx_head);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
1996
q->rx_head = NULL;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2004
skb = build_skb(buf, q->buf_size);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2008
skb_reserve(skb, q->buf_offset);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
2018
q->rx_head = skb;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
423
mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
433
if (!mt76_queue_is_wed_rx(q))
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
450
mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
719
mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3454
struct ieee80211_tx_queue_params *q = &link_info->queue_params[ac];
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3463
e->aifs = q->aifs;
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3464
e->txop = cpu_to_le16(q->txop);
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3466
if (q->cw_min)
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3467
e->cw_min = fls(q->cw_min);
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3471
if (q->cw_max)
drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
3472
e->cw_max = fls(q->cw_max);
drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
297
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
708
enum mt76_rxq_id q)
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
710
if (q == MT_RXQ_NPU0 || q == MT_RXQ_NPU1) {
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
715
airoha_npu_wlan_enable_irq(npu, q - MT_RXQ_NPU0);
drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
720
mt7996_irq_enable(dev, MT_INT_RX(q));
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
312
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
836
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
839
int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/mt7996/npu.c
215
struct mt76_queue *q = &dev->mt76.q_rx[MT_RXQ_MAIN_WA];
drivers/net/wireless/mediatek/mt76/mt7996/npu.c
221
q->desc_dma, GFP_KERNEL);
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
502
#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
503
#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
505
#define MT_Q_ID(q) (dev->q_id[(q)])
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
506
#define MT_Q_BASE(q) ((dev->q_wfdma_mask >> (q)) & 0x1 ? \
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
509
#define MT_MCUQ_ID(q) MT_Q_ID(q)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
510
#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
511
#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
513
#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
514
#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
515
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
520
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
521
MT_MCUQ_ID(q) * 0x4)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
522
#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
523
MT_RXQ_ID(q) * 0x4)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
524
#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
525
MT_TXQ_ID(q) * 0x4)
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
560
#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
drivers/net/wireless/mediatek/mt76/mt7996/regs.h
561
#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
drivers/net/wireless/mediatek/mt76/npu.c
103
len, q->buf_size);
drivers/net/wireless/mediatek/mt76/npu.c
107
index = (index + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/npu.c
109
q->tail = index;
drivers/net/wireless/mediatek/mt76/npu.c
110
q->queued -= i;
drivers/net/wireless/mediatek/mt76/npu.c
111
Q_WRITE(q, dma_idx, q->tail);
drivers/net/wireless/mediatek/mt76/npu.c
17
int mt76_npu_fill_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/npu.c
182
struct mt76_queue *q = q_instance;
drivers/net/wireless/mediatek/mt76/npu.c
183
struct mt76_dev *dev = q->dev;
drivers/net/wireless/mediatek/mt76/npu.c
184
int qid = q - &dev->q_rx[0];
drivers/net/wireless/mediatek/mt76/npu.c
206
int mt76_npu_dma_add_buf(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/npu.c
21
while (q->queued < q->ndesc - 1) {
drivers/net/wireless/mediatek/mt76/npu.c
211
struct airoha_npu_tx_dma_desc *desc = (void *)q->desc;
drivers/net/wireless/mediatek/mt76/npu.c
215
memcpy(desc[q->head].txwi, txwi_ptr, txwi_len);
drivers/net/wireless/mediatek/mt76/npu.c
216
desc[q->head].addr = buf->addr;
drivers/net/wireless/mediatek/mt76/npu.c
217
desc[q->head].ctrl = FIELD_PREP(NPU_TX_DMA_DESC_VEND_LEN_MASK, txwi_len) |
drivers/net/wireless/mediatek/mt76/npu.c
22
struct airoha_npu_rx_dma_desc *desc = (void *)q->desc;
drivers/net/wireless/mediatek/mt76/npu.c
221
ret = q->head;
drivers/net/wireless/mediatek/mt76/npu.c
222
q->entry[q->head].skip_buf0 = true;
drivers/net/wireless/mediatek/mt76/npu.c
223
q->entry[q->head].skip_buf1 = true;
drivers/net/wireless/mediatek/mt76/npu.c
224
q->entry[q->head].txwi = NULL;
drivers/net/wireless/mediatek/mt76/npu.c
225
q->entry[q->head].skb = NULL;
drivers/net/wireless/mediatek/mt76/npu.c
226
q->entry[q->head].wcid = 0xffff;
drivers/net/wireless/mediatek/mt76/npu.c
228
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/npu.c
229
q->queued++;
drivers/net/wireless/mediatek/mt76/npu.c
23
struct mt76_queue_entry *e = &q->entry[q->head];
drivers/net/wireless/mediatek/mt76/npu.c
234
void mt76_npu_txdesc_cleanup(struct mt76_queue *q, int index)
drivers/net/wireless/mediatek/mt76/npu.c
236
struct airoha_npu_tx_dma_desc *desc = (void *)q->desc;
drivers/net/wireless/mediatek/mt76/npu.c
238
if (!mt76_queue_is_npu_tx(q))
drivers/net/wireless/mediatek/mt76/npu.c
244
void mt76_npu_queue_setup(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/npu.c
246
int qid = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
drivers/net/wireless/mediatek/mt76/npu.c
247
bool xmit = mt76_queue_is_npu_tx(q);
drivers/net/wireless/mediatek/mt76/npu.c
250
if (!mt76_queue_is_npu(q))
drivers/net/wireless/mediatek/mt76/npu.c
255
q->wed_regs = airoha_npu_wlan_get_queue_addr(npu, qid, xmit);
drivers/net/wireless/mediatek/mt76/npu.c
258
int mt76_npu_rx_queue_init(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/npu.c
260
int err, irq, qid = q - &dev->q_rx[0];
drivers/net/wireless/mediatek/mt76/npu.c
27
e->buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
drivers/net/wireless/mediatek/mt76/npu.c
275
q->flags = MT_NPU_Q_RX(index);
drivers/net/wireless/mediatek/mt76/npu.c
277
err = dev->queue_ops->alloc(dev, q, 0, size,
drivers/net/wireless/mediatek/mt76/npu.c
289
IRQF_SHARED, name, q);
drivers/net/wireless/mediatek/mt76/npu.c
294
mt76_npu_fill_rx_queue(dev, q);
drivers/net/wireless/mediatek/mt76/npu.c
31
e->dma_len[0] = SKB_WITH_OVERHEAD(q->buf_size);
drivers/net/wireless/mediatek/mt76/npu.c
35
memset(&desc[q->head], 0, sizeof(*desc));
drivers/net/wireless/mediatek/mt76/npu.c
36
desc[q->head].addr = e->dma_addr[0];
drivers/net/wireless/mediatek/mt76/npu.c
38
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/npu.c
39
q->queued++;
drivers/net/wireless/mediatek/mt76/npu.c
46
void mt76_npu_queue_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/npu.c
48
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/npu.c
49
while (q->queued > 0) {
drivers/net/wireless/mediatek/mt76/npu.c
50
struct mt76_queue_entry *e = &q->entry[q->tail];
drivers/net/wireless/mediatek/mt76/npu.c
54
page_pool_get_dma_dir(q->page_pool));
drivers/net/wireless/mediatek/mt76/npu.c
56
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/npu.c
57
q->queued--;
drivers/net/wireless/mediatek/mt76/npu.c
59
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/npu.c
63
struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/npu.c
66
struct airoha_npu_rx_dma_desc *desc = (void *)q->desc;
drivers/net/wireless/mediatek/mt76/npu.c
67
int i, nframes, index = q->tail;
drivers/net/wireless/mediatek/mt76/npu.c
74
struct mt76_queue_entry *e = &q->entry[index];
drivers/net/wireless/mediatek/mt76/npu.c
85
page_pool_get_dma_dir(q->page_pool));
drivers/net/wireless/mediatek/mt76/npu.c
88
skb = napi_build_skb(e->buf, q->buf_size);
drivers/net/wireless/mediatek/mt76/sdio.c
306
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/sdio.c
308
spin_lock_init(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
309
q->entry = devm_kcalloc(dev->dev,
drivers/net/wireless/mediatek/mt76/sdio.c
310
MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
drivers/net/wireless/mediatek/mt76/sdio.c
312
if (!q->entry)
drivers/net/wireless/mediatek/mt76/sdio.c
315
q->ndesc = MT76S_NUM_RX_ENTRIES;
drivers/net/wireless/mediatek/mt76/sdio.c
316
q->head = q->tail = 0;
drivers/net/wireless/mediatek/mt76/sdio.c
317
q->queued = 0;
drivers/net/wireless/mediatek/mt76/sdio.c
325
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/sdio.c
327
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
drivers/net/wireless/mediatek/mt76/sdio.c
328
if (!q)
drivers/net/wireless/mediatek/mt76/sdio.c
331
spin_lock_init(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
332
q->entry = devm_kcalloc(dev->dev,
drivers/net/wireless/mediatek/mt76/sdio.c
333
MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
drivers/net/wireless/mediatek/mt76/sdio.c
335
if (!q->entry)
drivers/net/wireless/mediatek/mt76/sdio.c
338
q->ndesc = MT76S_NUM_TX_ENTRIES;
drivers/net/wireless/mediatek/mt76/sdio.c
340
return q;
drivers/net/wireless/mediatek/mt76/sdio.c
345
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/sdio.c
349
q = mt76s_alloc_tx_queue(dev);
drivers/net/wireless/mediatek/mt76/sdio.c
350
if (IS_ERR(q))
drivers/net/wireless/mediatek/mt76/sdio.c
351
return PTR_ERR(q);
drivers/net/wireless/mediatek/mt76/sdio.c
353
dev->phy.q_tx[i] = q;
drivers/net/wireless/mediatek/mt76/sdio.c
356
q = mt76s_alloc_tx_queue(dev);
drivers/net/wireless/mediatek/mt76/sdio.c
357
if (IS_ERR(q))
drivers/net/wireless/mediatek/mt76/sdio.c
358
return PTR_ERR(q);
drivers/net/wireless/mediatek/mt76/sdio.c
360
dev->q_mcu[MT_MCUQ_WM] = q;
drivers/net/wireless/mediatek/mt76/sdio.c
367
mt76s_get_next_rx_entry(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/sdio.c
371
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
372
if (q->queued > 0) {
drivers/net/wireless/mediatek/mt76/sdio.c
373
e = &q->entry[q->tail];
drivers/net/wireless/mediatek/mt76/sdio.c
374
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/sdio.c
375
q->queued--;
drivers/net/wireless/mediatek/mt76/sdio.c
377
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
383
mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/sdio.c
385
int qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/sdio.c
394
e = mt76s_get_next_rx_entry(q);
drivers/net/wireless/mediatek/mt76/sdio.c
429
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/sdio.c
435
if (!q)
drivers/net/wireless/mediatek/mt76/sdio.c
438
mcu = q == dev->q_mcu[MT_MCUQ_WM];
drivers/net/wireless/mediatek/mt76/sdio.c
439
while (q->queued > 0) {
drivers/net/wireless/mediatek/mt76/sdio.c
440
if (!q->entry[q->tail].done)
drivers/net/wireless/mediatek/mt76/sdio.c
443
entry = q->entry[q->tail];
drivers/net/wireless/mediatek/mt76/sdio.c
444
q->entry[q->tail].done = false;
drivers/net/wireless/mediatek/mt76/sdio.c
451
mt76_queue_tx_complete(dev, q, &entry);
drivers/net/wireless/mediatek/mt76/sdio.c
455
if (!q->queued)
drivers/net/wireless/mediatek/mt76/sdio.c
518
mt76s_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/sdio.c
527
u16 idx = q->head;
drivers/net/wireless/mediatek/mt76/sdio.c
529
if (q->queued == q->ndesc)
drivers/net/wireless/mediatek/mt76/sdio.c
537
q->entry[q->head].skb = tx_info.skb;
drivers/net/wireless/mediatek/mt76/sdio.c
538
q->entry[q->head].buf_sz = len;
drivers/net/wireless/mediatek/mt76/sdio.c
539
q->entry[q->head].wcid = 0xffff;
drivers/net/wireless/mediatek/mt76/sdio.c
543
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/sdio.c
544
q->queued++;
drivers/net/wireless/mediatek/mt76/sdio.c
550
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/sdio.c
560
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
562
if (q->queued == q->ndesc) {
drivers/net/wireless/mediatek/mt76/sdio.c
564
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
568
q->entry[q->head].buf_sz = len;
drivers/net/wireless/mediatek/mt76/sdio.c
569
q->entry[q->head].skb = skb;
drivers/net/wireless/mediatek/mt76/sdio.c
574
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/sdio.c
575
q->queued++;
drivers/net/wireless/mediatek/mt76/sdio.c
577
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio.c
587
static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/sdio.c
619
struct mt76_queue *q = &dev->q_rx[i];
drivers/net/wireless/mediatek/mt76/sdio.c
622
for (j = 0; j < q->ndesc; j++) {
drivers/net/wireless/mediatek/mt76/sdio.c
623
struct mt76_queue_entry *e = &q->entry[j];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
124
int index = (q->head + i) % q->ndesc;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
125
struct mt76_queue_entry *e = &q->entry[index];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
138
if (q->queued + i + 1 == q->ndesc)
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
146
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
147
q->head = (q->head + i) % q->ndesc;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
148
q->queued += i;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
149
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
245
static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
248
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
252
while (q->first != q->head) {
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
253
struct mt76_queue_entry *e = &q->entry[q->first];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
298
q->first = (q->first + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
375
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
380
q = dev->phy.q_tx[i];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
382
q = dev->q_mcu[MT_MCUQ_WM];
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
384
if (q->first != q->head)
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
88
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/testmode.c
37
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/testmode.c
45
q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/testmode.c
49
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/testmode.c
53
q->queued < q->ndesc / 2) {
drivers/net/wireless/mediatek/mt76/testmode.c
56
ret = dev->queue_ops->tx_queue_skb(phy, q, qid, skb_get(skb),
drivers/net/wireless/mediatek/mt76/testmode.c
65
dev->queue_ops->kick(dev, q);
drivers/net/wireless/mediatek/mt76/testmode.c
67
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
303
struct mt76_queue *q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/tx.c
310
idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta);
drivers/net/wireless/mediatek/mt76/tx.c
318
q->entry[idx].wcid = wcid->idx;
drivers/net/wireless/mediatek/mt76/tx.c
451
mt76_txq_stopped(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/tx.c
453
return q->stopped || q->blocked ||
drivers/net/wireless/mediatek/mt76/tx.c
454
q->queued + MT_TXQ_FREE_THR >= q->ndesc;
drivers/net/wireless/mediatek/mt76/tx.c
458
mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/tx.c
485
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
487
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
495
if (stop || mt76_txq_stopped(q))
drivers/net/wireless/mediatek/mt76/tx.c
507
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
509
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
516
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
517
dev->queue_ops->kick(dev, q);
drivers/net/wireless/mediatek/mt76/tx.c
518
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
530
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/tx.c
549
q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/tx.c
551
q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
drivers/net/wireless/mediatek/mt76/tx.c
552
dev->queue_ops->tx_cleanup(dev, q, false);
drivers/net/wireless/mediatek/mt76/tx.c
566
if (!mt76_txq_stopped(q))
drivers/net/wireless/mediatek/mt76/tx.c
567
n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
drivers/net/wireless/mediatek/mt76/tx.c
607
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/tx.c
625
q = phy->q_tx[qid];
drivers/net/wireless/mediatek/mt76/tx.c
626
if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
drivers/net/wireless/mediatek/mt76/tx.c
635
spin_lock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
637
dev->queue_ops->kick(dev, q);
drivers/net/wireless/mediatek/mt76/tx.c
638
spin_unlock(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
805
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/tx.c
811
spin_lock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
812
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/tx.c
813
q->queued--;
drivers/net/wireless/mediatek/mt76/tx.c
814
spin_unlock_bh(&q->lock);
drivers/net/wireless/mediatek/mt76/tx.c
821
struct mt76_queue *q = phy->q_tx[0];
drivers/net/wireless/mediatek/mt76/tx.c
823
if (blocked == q->blocked)
drivers/net/wireless/mediatek/mt76/tx.c
826
q->blocked = blocked;
drivers/net/wireless/mediatek/mt76/tx.c
830
q = phy->q_tx[0];
drivers/net/wireless/mediatek/mt76/tx.c
831
q->blocked = blocked;
drivers/net/wireless/mediatek/mt76/tx.c
835
q = phy->q_tx[0];
drivers/net/wireless/mediatek/mt76/tx.c
836
q->blocked = blocked;
drivers/net/wireless/mediatek/mt76/usb.c
1004
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/usb.c
1010
q = dev->phy.q_tx[i];
drivers/net/wireless/mediatek/mt76/usb.c
1011
if (!q)
drivers/net/wireless/mediatek/mt76/usb.c
1014
for (j = 0; j < q->ndesc; j++)
drivers/net/wireless/mediatek/mt76/usb.c
1015
usb_kill_urb(q->entry[j].urb);
drivers/net/wireless/mediatek/mt76/usb.c
1024
q = dev->phy.q_tx[i];
drivers/net/wireless/mediatek/mt76/usb.c
1025
if (!q)
drivers/net/wireless/mediatek/mt76/usb.c
1028
while (q->queued > 0) {
drivers/net/wireless/mediatek/mt76/usb.c
1029
entry = q->entry[q->tail];
drivers/net/wireless/mediatek/mt76/usb.c
1030
q->entry[q->tail].done = false;
drivers/net/wireless/mediatek/mt76/usb.c
1031
mt76_queue_tx_complete(dev, q, &entry);
drivers/net/wireless/mediatek/mt76/usb.c
320
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
drivers/net/wireless/mediatek/mt76/usb.c
329
data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
drivers/net/wireless/mediatek/mt76/usb.c
333
sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
drivers/net/wireless/mediatek/mt76/usb.c
346
urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
drivers/net/wireless/mediatek/mt76/usb.c
353
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/usb.c
356
enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/usb.c
360
return mt76u_fill_rx_sg(dev, q, urb, nsgs);
drivers/net/wireless/mediatek/mt76/usb.c
362
urb->transfer_buffer_length = q->buf_size;
drivers/net/wireless/mediatek/mt76/usb.c
363
urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
drivers/net/wireless/mediatek/mt76/usb.c
390
mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/usb.c
393
enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/usb.c
401
return mt76u_refill_rx(dev, q, e->urb, sg_size);
drivers/net/wireless/mediatek/mt76/usb.c
438
mt76u_get_next_rx_entry(struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/usb.c
443
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/mediatek/mt76/usb.c
444
if (q->queued > 0) {
drivers/net/wireless/mediatek/mt76/usb.c
445
urb = q->entry[q->tail].urb;
drivers/net/wireless/mediatek/mt76/usb.c
446
q->tail = (q->tail + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/usb.c
447
q->queued--;
drivers/net/wireless/mediatek/mt76/usb.c
449
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/mediatek/mt76/usb.c
558
struct mt76_queue *q = urb->context;
drivers/net/wireless/mediatek/mt76/usb.c
577
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/mediatek/mt76/usb.c
578
if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
drivers/net/wireless/mediatek/mt76/usb.c
581
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/usb.c
582
q->queued++;
drivers/net/wireless/mediatek/mt76/usb.c
585
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/mediatek/mt76/usb.c
602
mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/usb.c
604
int qid = q - &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/usb.c
609
urb = mt76u_get_next_rx_entry(q);
drivers/net/wireless/mediatek/mt76/usb.c
613
count = mt76u_process_rx_entry(dev, urb, q->buf_size);
drivers/net/wireless/mediatek/mt76/usb.c
615
err = mt76u_refill_rx(dev, q, urb, count);
drivers/net/wireless/mediatek/mt76/usb.c
643
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/usb.c
647
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/mediatek/mt76/usb.c
648
for (i = 0; i < q->ndesc; i++) {
drivers/net/wireless/mediatek/mt76/usb.c
649
err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
drivers/net/wireless/mediatek/mt76/usb.c
653
q->head = q->tail = 0;
drivers/net/wireless/mediatek/mt76/usb.c
654
q->queued = 0;
drivers/net/wireless/mediatek/mt76/usb.c
655
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/mediatek/mt76/usb.c
663
struct mt76_queue *q = &dev->q_rx[qid];
drivers/net/wireless/mediatek/mt76/usb.c
666
err = mt76_create_page_pool(dev, q);
drivers/net/wireless/mediatek/mt76/usb.c
670
spin_lock_init(&q->lock);
drivers/net/wireless/mediatek/mt76/usb.c
671
q->entry = devm_kcalloc(dev->dev,
drivers/net/wireless/mediatek/mt76/usb.c
672
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
drivers/net/wireless/mediatek/mt76/usb.c
674
if (!q->entry)
drivers/net/wireless/mediatek/mt76/usb.c
677
q->ndesc = MT_NUM_RX_ENTRIES;
drivers/net/wireless/mediatek/mt76/usb.c
678
q->buf_size = PAGE_SIZE;
drivers/net/wireless/mediatek/mt76/usb.c
680
for (i = 0; i < q->ndesc; i++) {
drivers/net/wireless/mediatek/mt76/usb.c
681
err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
drivers/net/wireless/mediatek/mt76/usb.c
696
mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/usb.c
700
for (i = 0; i < q->ndesc; i++) {
drivers/net/wireless/mediatek/mt76/usb.c
701
if (!q->entry[i].urb)
drivers/net/wireless/mediatek/mt76/usb.c
704
mt76u_urb_free(q->entry[i].urb);
drivers/net/wireless/mediatek/mt76/usb.c
705
q->entry[i].urb = NULL;
drivers/net/wireless/mediatek/mt76/usb.c
707
page_pool_destroy(q->page_pool);
drivers/net/wireless/mediatek/mt76/usb.c
708
q->page_pool = NULL;
drivers/net/wireless/mediatek/mt76/usb.c
728
struct mt76_queue *q = &dev->q_rx[i];
drivers/net/wireless/mediatek/mt76/usb.c
731
for (j = 0; j < q->ndesc; j++)
drivers/net/wireless/mediatek/mt76/usb.c
732
usb_poison_urb(q->entry[j].urb);
drivers/net/wireless/mediatek/mt76/usb.c
742
struct mt76_queue *q = &dev->q_rx[i];
drivers/net/wireless/mediatek/mt76/usb.c
745
for (j = 0; j < q->ndesc; j++)
drivers/net/wireless/mediatek/mt76/usb.c
746
usb_unpoison_urb(q->entry[j].urb);
drivers/net/wireless/mediatek/mt76/usb.c
764
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/usb.c
771
q = dev->phy.q_tx[i];
drivers/net/wireless/mediatek/mt76/usb.c
772
if (!q)
drivers/net/wireless/mediatek/mt76/usb.c
775
while (q->queued > 0) {
drivers/net/wireless/mediatek/mt76/usb.c
776
if (!q->entry[q->tail].done)
drivers/net/wireless/mediatek/mt76/usb.c
779
entry = q->entry[q->tail];
drivers/net/wireless/mediatek/mt76/usb.c
780
q->entry[q->tail].done = false;
drivers/net/wireless/mediatek/mt76/usb.c
782
mt76_queue_tx_complete(dev, q, &entry);
drivers/net/wireless/mediatek/mt76/usb.c
785
if (!q->queued)
drivers/net/wireless/mediatek/mt76/usb.c
853
mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
drivers/net/wireless/mediatek/mt76/usb.c
861
u16 idx = q->head;
drivers/net/wireless/mediatek/mt76/usb.c
864
if (q->queued == q->ndesc)
drivers/net/wireless/mediatek/mt76/usb.c
872
err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
drivers/net/wireless/mediatek/mt76/usb.c
876
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
drivers/net/wireless/mediatek/mt76/usb.c
877
mt76u_complete_tx, &q->entry[idx]);
drivers/net/wireless/mediatek/mt76/usb.c
879
q->head = (q->head + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/usb.c
880
q->entry[idx].skb = tx_info.skb;
drivers/net/wireless/mediatek/mt76/usb.c
881
q->entry[idx].wcid = 0xffff;
drivers/net/wireless/mediatek/mt76/usb.c
882
q->queued++;
drivers/net/wireless/mediatek/mt76/usb.c
887
static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
drivers/net/wireless/mediatek/mt76/usb.c
892
while (q->first != q->head) {
drivers/net/wireless/mediatek/mt76/usb.c
893
urb = q->entry[q->first].urb;
drivers/net/wireless/mediatek/mt76/usb.c
905
q->first = (q->first + 1) % q->ndesc;
drivers/net/wireless/mediatek/mt76/usb.c
910
mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
drivers/net/wireless/mediatek/mt76/usb.c
924
q->hw_idx = lmac_queue_map[ac];
drivers/net/wireless/mediatek/mt76/usb.c
925
q->ep = q->hw_idx + 1;
drivers/net/wireless/mediatek/mt76/usb.c
930
q->hw_idx = mt76_ac_to_hwq(ac);
drivers/net/wireless/mediatek/mt76/usb.c
931
q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
drivers/net/wireless/mediatek/mt76/usb.c
934
q->hw_idx = mt76_ac_to_hwq(ac);
drivers/net/wireless/mediatek/mt76/usb.c
935
q->ep = q->hw_idx + 1;
drivers/net/wireless/mediatek/mt76/usb.c
945
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/usb.c
948
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
drivers/net/wireless/mediatek/mt76/usb.c
949
if (!q)
drivers/net/wireless/mediatek/mt76/usb.c
952
spin_lock_init(&q->lock);
drivers/net/wireless/mediatek/mt76/usb.c
953
mt76u_ac_to_hwq(dev, q, i);
drivers/net/wireless/mediatek/mt76/usb.c
954
dev->phy.q_tx[i] = q;
drivers/net/wireless/mediatek/mt76/usb.c
956
q->entry = devm_kcalloc(dev->dev,
drivers/net/wireless/mediatek/mt76/usb.c
957
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
drivers/net/wireless/mediatek/mt76/usb.c
959
if (!q->entry)
drivers/net/wireless/mediatek/mt76/usb.c
962
q->ndesc = MT_NUM_TX_ENTRIES;
drivers/net/wireless/mediatek/mt76/usb.c
963
for (j = 0; j < q->ndesc; j++) {
drivers/net/wireless/mediatek/mt76/usb.c
964
err = mt76u_urb_alloc(dev, &q->entry[j],
drivers/net/wireless/mediatek/mt76/usb.c
980
struct mt76_queue *q;
drivers/net/wireless/mediatek/mt76/usb.c
983
q = dev->phy.q_tx[i];
drivers/net/wireless/mediatek/mt76/usb.c
984
if (!q)
drivers/net/wireless/mediatek/mt76/usb.c
987
for (j = 0; j < q->ndesc; j++) {
drivers/net/wireless/mediatek/mt76/usb.c
988
usb_free_urb(q->entry[j].urb);
drivers/net/wireless/mediatek/mt76/usb.c
989
q->entry[j].urb = NULL;
drivers/net/wireless/mediatek/mt76/wed.c
101
flags = q->flags;
drivers/net/wireless/mediatek/mt76/wed.c
102
if (!q->wed || !mtk_wed_device_active(q->wed))
drivers/net/wireless/mediatek/mt76/wed.c
103
q->flags &= ~MT_QFLAG_WED;
drivers/net/wireless/mediatek/mt76/wed.c
105
if (!(q->flags & MT_QFLAG_WED))
drivers/net/wireless/mediatek/mt76/wed.c
108
type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
drivers/net/wireless/mediatek/mt76/wed.c
109
ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
drivers/net/wireless/mediatek/mt76/wed.c
113
ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
drivers/net/wireless/mediatek/mt76/wed.c
116
q->wed_regs = q->wed->tx_ring[ring].reg_base;
drivers/net/wireless/mediatek/mt76/wed.c
120
q->flags = 0;
drivers/net/wireless/mediatek/mt76/wed.c
121
mt76_dma_queue_reset(dev, q, true);
drivers/net/wireless/mediatek/mt76/wed.c
122
mt76_dma_rx_fill(dev, q, false);
drivers/net/wireless/mediatek/mt76/wed.c
124
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
drivers/net/wireless/mediatek/mt76/wed.c
126
q->wed_regs = q->wed->txfree_ring.reg_base;
drivers/net/wireless/mediatek/mt76/wed.c
129
ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
drivers/net/wireless/mediatek/mt76/wed.c
132
q->wed_regs = q->wed->rx_ring[ring].reg_base;
drivers/net/wireless/mediatek/mt76/wed.c
135
q->flags &= ~MT_QFLAG_WED;
drivers/net/wireless/mediatek/mt76/wed.c
136
mt76_dma_queue_reset(dev, q, false);
drivers/net/wireless/mediatek/mt76/wed.c
137
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
drivers/net/wireless/mediatek/mt76/wed.c
138
q->head = q->ndesc - 1;
drivers/net/wireless/mediatek/mt76/wed.c
139
q->queued = q->head;
drivers/net/wireless/mediatek/mt76/wed.c
142
q->flags &= ~MT_QFLAG_WED;
drivers/net/wireless/mediatek/mt76/wed.c
143
mt76_dma_queue_reset(dev, q, false);
drivers/net/wireless/mediatek/mt76/wed.c
144
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
drivers/net/wireless/mediatek/mt76/wed.c
145
q->head = q->ndesc - 1;
drivers/net/wireless/mediatek/mt76/wed.c
146
q->queued = q->head;
drivers/net/wireless/mediatek/mt76/wed.c
149
q->flags &= ~MT_QFLAG_WED;
drivers/net/wireless/mediatek/mt76/wed.c
150
mt76_dma_queue_reset(dev, q, true);
drivers/net/wireless/mediatek/mt76/wed.c
151
mt76_dma_rx_fill(dev, q, false);
drivers/net/wireless/mediatek/mt76/wed.c
152
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
drivers/net/wireless/mediatek/mt76/wed.c
158
q->flags = flags;
drivers/net/wireless/mediatek/mt76/wed.c
36
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
drivers/net/wireless/mediatek/mt76/wed.c
50
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
drivers/net/wireless/mediatek/mt76/wed.c
93
int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
drivers/net/wireless/mediatek/mt76/wed.c
98
if (!q || !q->ndesc)
drivers/net/wireless/mediatek/mt7601u/dma.c
173
struct mt7601u_rx_queue *q = &dev->rx_q;
drivers/net/wireless/mediatek/mt7601u/dma.c
179
if (!q->pending)
drivers/net/wireless/mediatek/mt7601u/dma.c
182
buf = &q->e[q->start];
drivers/net/wireless/mediatek/mt7601u/dma.c
183
q->pending--;
drivers/net/wireless/mediatek/mt7601u/dma.c
184
q->start = (q->start + 1) % q->entries;
drivers/net/wireless/mediatek/mt7601u/dma.c
194
struct mt7601u_rx_queue *q = &dev->rx_q;
drivers/net/wireless/mediatek/mt7601u/dma.c
215
if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
drivers/net/wireless/mediatek/mt7601u/dma.c
218
q->end = (q->end + 1) % q->entries;
drivers/net/wireless/mediatek/mt7601u/dma.c
219
q->pending++;
drivers/net/wireless/mediatek/mt7601u/dma.c
241
struct mt7601u_tx_queue *q = urb->context;
drivers/net/wireless/mediatek/mt7601u/dma.c
242
struct mt7601u_dev *dev = q->dev;
drivers/net/wireless/mediatek/mt7601u/dma.c
261
if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
drivers/net/wireless/mediatek/mt7601u/dma.c
264
skb = q->e[q->start].skb;
drivers/net/wireless/mediatek/mt7601u/dma.c
265
q->e[q->start].skb = NULL;
drivers/net/wireless/mediatek/mt7601u/dma.c
271
if (q->used == q->entries - q->entries / 8)
drivers/net/wireless/mediatek/mt7601u/dma.c
274
q->start = (q->start + 1) % q->entries;
drivers/net/wireless/mediatek/mt7601u/dma.c
275
q->used--;
drivers/net/wireless/mediatek/mt7601u/dma.c
312
struct mt7601u_tx_queue *q = &dev->tx_q[ep];
drivers/net/wireless/mediatek/mt7601u/dma.c
318
if (WARN_ON(q->entries <= q->used)) {
drivers/net/wireless/mediatek/mt7601u/dma.c
323
e = &q->e[q->end];
drivers/net/wireless/mediatek/mt7601u/dma.c
325
mt7601u_complete_tx, q);
drivers/net/wireless/mediatek/mt7601u/dma.c
339
q->end = (q->end + 1) % q->entries;
drivers/net/wireless/mediatek/mt7601u/dma.c
340
q->used++;
drivers/net/wireless/mediatek/mt7601u/dma.c
343
if (q->used >= q->entries)
drivers/net/wireless/mediatek/mt7601u/dma.c
461
static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
drivers/net/wireless/mediatek/mt7601u/dma.c
465
for (i = 0; i < q->entries; i++) {
drivers/net/wireless/mediatek/mt7601u/dma.c
466
usb_poison_urb(q->e[i].urb);
drivers/net/wireless/mediatek/mt7601u/dma.c
467
if (q->e[i].skb)
drivers/net/wireless/mediatek/mt7601u/dma.c
468
mt7601u_tx_status(q->dev, q->e[i].skb);
drivers/net/wireless/mediatek/mt7601u/dma.c
469
usb_free_urb(q->e[i].urb);
drivers/net/wireless/mediatek/mt7601u/dma.c
485
struct mt7601u_tx_queue *q)
drivers/net/wireless/mediatek/mt7601u/dma.c
489
q->dev = dev;
drivers/net/wireless/mediatek/mt7601u/dma.c
490
q->entries = N_TX_ENTRIES;
drivers/net/wireless/mediatek/mt7601u/dma.c
493
q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
drivers/net/wireless/mediatek/mt7601u/dma.c
494
if (!q->e[i].urb)
drivers/net/wireless/mediatek/mt7601u/tx.c
21
static u8 q2hwq(u8 q)
drivers/net/wireless/mediatek/mt7601u/tx.c
23
return q ^ 0x3;
drivers/net/wireless/microchip/wilc1000/wlan.c
272
struct wilc_tx_queue_status *q = &wl->tx_q_limit;
drivers/net/wireless/microchip/wilc1000/wlan.c
278
if (!q->initialized) {
drivers/net/wireless/microchip/wilc1000/wlan.c
280
q->buffer[i] = i % NQUEUES;
drivers/net/wireless/microchip/wilc1000/wlan.c
283
q->cnt[i] = AC_BUFFER_SIZE * factors[i] / NQUEUES;
drivers/net/wireless/microchip/wilc1000/wlan.c
284
q->sum += q->cnt[i];
drivers/net/wireless/microchip/wilc1000/wlan.c
286
q->end_index = AC_BUFFER_SIZE - 1;
drivers/net/wireless/microchip/wilc1000/wlan.c
287
q->initialized = 1;
drivers/net/wireless/microchip/wilc1000/wlan.c
290
end_index = q->end_index;
drivers/net/wireless/microchip/wilc1000/wlan.c
291
q->cnt[q->buffer[end_index]] -= factors[q->buffer[end_index]];
drivers/net/wireless/microchip/wilc1000/wlan.c
292
q->cnt[q_num] += factors[q_num];
drivers/net/wireless/microchip/wilc1000/wlan.c
293
q->sum += (factors[q_num] - factors[q->buffer[end_index]]);
drivers/net/wireless/microchip/wilc1000/wlan.c
295
q->buffer[end_index] = q_num;
drivers/net/wireless/microchip/wilc1000/wlan.c
297
q->end_index--;
drivers/net/wireless/microchip/wilc1000/wlan.c
299
q->end_index = AC_BUFFER_SIZE - 1;
drivers/net/wireless/microchip/wilc1000/wlan.c
301
if (!q->sum)
drivers/net/wireless/microchip/wilc1000/wlan.c
304
q_limit = (q->cnt[q_num] * FLOW_CONTROL_UPPER_THRESHOLD / q->sum) + 1;
drivers/net/wireless/purelifi/plfxlc/mac.c
175
struct sk_buff_head *q = NULL;
drivers/net/wireless/purelifi/plfxlc/mac.c
186
q = &mac->ack_wait_queue;
drivers/net/wireless/purelifi/plfxlc/mac.c
188
skb_queue_tail(q, skb);
drivers/net/wireless/purelifi/plfxlc/mac.c
189
while (skb_queue_len(q)/* > PURELIFI_MAC_MAX_ACK_WAITERS*/) {
drivers/net/wireless/purelifi/plfxlc/mac.c
190
plfxlc_mac_tx_status(hw, skb_dequeue(q),
drivers/net/wireless/purelifi/plfxlc/mac.c
342
struct sk_buff_head *q;
drivers/net/wireless/purelifi/plfxlc/mac.c
354
q = &mac->ack_wait_queue;
drivers/net/wireless/purelifi/plfxlc/mac.c
355
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/purelifi/plfxlc/mac.c
357
skb_queue_walk(q, skb) {
drivers/net/wireless/purelifi/plfxlc/mac.c
362
if (mac->ack_pending && skb_queue_is_first(q, skb))
drivers/net/wireless/purelifi/plfxlc/mac.c
376
skb = __skb_dequeue(q);
drivers/net/wireless/purelifi/plfxlc/mac.c
385
mac->ack_pending = skb_queue_len(q) ? 1 : 0;
drivers/net/wireless/purelifi/plfxlc/mac.c
389
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/realtek/rtw88/mac.c
1064
u32 q;
drivers/net/wireless/realtek/rtw88/mac.c
1066
for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
drivers/net/wireless/realtek/rtw88/mac.c
1067
if (prio_queues & BIT(q))
drivers/net/wireless/realtek/rtw88/mac.c
1068
__rtw_mac_flush_prio_queue(rtwdev, q, drop);
drivers/net/wireless/realtek/rtw88/pci.c
740
u8 q;
drivers/net/wireless/realtek/rtw88/pci.c
742
for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
drivers/net/wireless/realtek/rtw88/pci.c
744
if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
drivers/net/wireless/realtek/rtw88/pci.c
745
q == RTW_TX_QUEUE_HI0)
drivers/net/wireless/realtek/rtw88/pci.c
748
if (pci_queues & BIT(q))
drivers/net/wireless/realtek/rtw88/pci.c
749
__pci_flush_queue(rtwdev, q, drop);
drivers/net/wireless/st/cw1200/debug.c
70
struct cw1200_queue *q)
drivers/net/wireless/st/cw1200/debug.c
73
seq_printf(seq, "Queue %d:\n", q->queue_id);
drivers/net/wireless/st/cw1200/debug.c
74
seq_printf(seq, " capacity: %zu\n", q->capacity);
drivers/net/wireless/st/cw1200/debug.c
75
seq_printf(seq, " queued: %zu\n", q->num_queued);
drivers/net/wireless/st/cw1200/debug.c
76
seq_printf(seq, " pending: %zu\n", q->num_pending);
drivers/net/wireless/st/cw1200/debug.c
77
seq_printf(seq, " sent: %zu\n", q->num_sent);
drivers/net/wireless/st/cw1200/debug.c
78
seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no");
drivers/net/wireless/st/cw1200/debug.c
79
seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no");
drivers/net/wireless/st/cw1200/debug.c
81
for (i = 0; i < q->stats->map_capacity; ++i)
drivers/net/wireless/st/cw1200/debug.c
82
seq_printf(seq, "%.2d ", q->link_map_cache[i]);
drivers/net/wireless/st/cw1200/debug.c
83
seq_printf(seq, "<-%zu\n", q->stats->map_capacity);
drivers/net/wireless/ti/wl18xx/main.c
727
wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
drivers/net/wireless/ti/wl18xx/main.c
760
wl18xx_clk_table[clk_freq].q &
drivers/net/wireless/ti/wl18xx/main.c
767
(wl18xx_clk_table[clk_freq].q >> 16) &
drivers/net/wireless/ti/wl18xx/wl18xx.h
228
u32 q;
drivers/net/wireless/ti/wlcore/main.c
1283
int q, mapping;
drivers/net/wireless/ti/wlcore/main.c
1294
q = wl1271_tx_get_queue(mapping);
drivers/net/wireless/ti/wlcore/main.c
1307
(wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
drivers/net/wireless/ti/wlcore/main.c
1308
!wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
drivers/net/wireless/ti/wlcore/main.c
1310
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
drivers/net/wireless/ti/wlcore/main.c
1316
hlid, q, skb->len);
drivers/net/wireless/ti/wlcore/main.c
1317
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
drivers/net/wireless/ti/wlcore/main.c
1319
wl->tx_queue_count[q]++;
drivers/net/wireless/ti/wlcore/main.c
1320
wlvif->tx_queue_count[q]++;
drivers/net/wireless/ti/wlcore/main.c
1326
if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
drivers/net/wireless/ti/wlcore/main.c
1327
!wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
drivers/net/wireless/ti/wlcore/main.c
1329
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
drivers/net/wireless/ti/wlcore/main.c
1330
wlcore_stop_queue_locked(wl, wlvif, q,
drivers/net/wireless/ti/wlcore/main.c
1350
int q;
drivers/net/wireless/ti/wlcore/main.c
1356
q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
drivers/net/wireless/ti/wlcore/main.c
1360
wl->tx_queue_count[q]++;
drivers/net/wireless/ti/wlcore/tx.c
487
int i, q = -1, ac;
drivers/net/wireless/ti/wlcore/tx.c
501
q = ac;
drivers/net/wireless/ti/wlcore/tx.c
502
min_pkts = wl->tx_allocated_pkts[q];
drivers/net/wireless/ti/wlcore/tx.c
506
return q;
drivers/net/wireless/ti/wlcore/tx.c
510
struct wl1271_link *lnk, u8 q)
drivers/net/wireless/ti/wlcore/tx.c
515
skb = skb_dequeue(&lnk->tx_queue[q]);
drivers/net/wireless/ti/wlcore/tx.c
518
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
drivers/net/wireless/ti/wlcore/tx.c
519
wl->tx_queue_count[q]--;
drivers/net/wireless/ti/wlcore/tx.c
521
WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
drivers/net/wireless/ti/wlcore/tx.c
522
lnk->wlvif->tx_queue_count[q]--;
drivers/net/wireless/ti/wlcore/tx.c
660
int q;
drivers/net/wireless/ti/wlcore/tx.c
664
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
drivers/net/wireless/ti/wlcore/tx.c
666
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
drivers/net/wireless/ti/wlcore/tx.c
667
wl->tx_queue_count[q]--;
drivers/net/wireless/ti/wlcore/tx.c
678
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
drivers/net/wireless/ti/wlcore/tx.c
683
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
drivers/net/wireless/ti/wlcore/tx.c
691
wl->tx_queue_count[q]++;
drivers/net/wireless/ti/wlcore/tx.c
693
wlvif->tx_queue_count[q]++;
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1008
skb = __skb_dequeue(q);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
1014
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
489
struct sk_buff_head *q = &mac->ack_wait_queue;
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
498
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
500
skb_queue_walk(q, skb) {
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
513
skb_queue_is_first(q, skb)) {
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
545
skb = __skb_dequeue(q);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
553
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
582
struct sk_buff_head *q = &mac->ack_wait_queue;
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
584
skb_queue_tail(q, skb);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
585
while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
586
skb = skb_dequeue(q);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
968
struct sk_buff_head *q;
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
976
q = &mac->ack_wait_queue;
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
977
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
978
skb_queue_walk(q, skb) {
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
983
if (mac->ack_pending && skb_queue_is_first(q, skb))
drivers/net/wireless/zydas/zd1211rw/zd_mac.c
996
skb = __skb_dequeue(q);
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1061
struct sk_buff_head *q = &tx->submitted_skbs;
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1067
spin_lock_irqsave(&q->lock, flags);
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1068
skb_queue_walk_safe(q, skb, skbnext) {
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
1077
spin_unlock_irqrestore(&q->lock, flags);
drivers/net/wwan/t7xx/t7xx_reg.h
255
#define DPMAIF_ULQSAR_n(q) (DPMAIF_AO_UL_CHNL0_CON0 + 0x10 * (q))
drivers/net/wwan/t7xx/t7xx_reg.h
256
#define DPMAIF_UL_DRBSIZE_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON1 + 0x10 * (q))
drivers/net/wwan/t7xx/t7xx_reg.h
257
#define DPMAIF_UL_DRB_ADDRH_n(q) (DPMAIF_AO_UL_CHNL0_CON2 + 0x10 * (q))
drivers/net/wwan/t7xx/t7xx_reg.h
258
#define DPMAIF_ULQ_STA0_n(q) (DPMAIF_AO_UL_CH0_STA + 0x04 * (q))
drivers/net/wwan/t7xx/t7xx_reg.h
259
#define DPMAIF_ULQ_ADD_DESC_CH_n(q) (DPMAIF_UL_ADD_DESC_CH0 + 0x04 * (q))
drivers/nvme/host/apple.c
1007
static void apple_nvme_init_queue(struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
1009
unsigned int depth = apple_nvme_queue_depth(q);
drivers/nvme/host/apple.c
1010
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
1012
q->cq_head = 0;
drivers/nvme/host/apple.c
1013
q->cq_phase = 1;
drivers/nvme/host/apple.c
1015
memset(q->tcbs, 0, anv->hw->max_queue_depth
drivers/nvme/host/apple.c
1017
memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
drivers/nvme/host/apple.c
1018
WRITE_ONCE(q->enabled, true);
drivers/nvme/host/apple.c
1354
struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
1356
unsigned int depth = apple_nvme_queue_depth(q);
drivers/nvme/host/apple.c
1359
q->cqes = dmam_alloc_coherent(anv->dev,
drivers/nvme/host/apple.c
1361
&q->cq_dma_addr, GFP_KERNEL);
drivers/nvme/host/apple.c
1362
if (!q->cqes)
drivers/nvme/host/apple.c
1370
q->sqes = dmam_alloc_coherent(anv->dev, iosq_size,
drivers/nvme/host/apple.c
1371
&q->sq_dma_addr, GFP_KERNEL);
drivers/nvme/host/apple.c
1372
if (!q->sqes)
drivers/nvme/host/apple.c
1380
q->tcbs = dmam_alloc_coherent(anv->dev,
drivers/nvme/host/apple.c
1383
&q->tcb_dma_addr, GFP_KERNEL);
drivers/nvme/host/apple.c
1384
if (!q->tcbs)
drivers/nvme/host/apple.c
1392
q->cq_phase = 1;
drivers/nvme/host/apple.c
163
struct apple_nvme_queue *q;
drivers/nvme/host/apple.c
216
static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
218
if (q->is_adminq)
drivers/nvme/host/apple.c
219
return container_of(q, struct apple_nvme, adminq);
drivers/nvme/host/apple.c
221
return container_of(q, struct apple_nvme, ioq);
drivers/nvme/host/apple.c
224
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
226
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
228
if (q->is_adminq && anv->hw->has_lsq_nvmmu)
drivers/nvme/host/apple.c
283
static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
drivers/nvme/host/apple.c
285
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
293
static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q,
drivers/nvme/host/apple.c
296
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
300
if (q->is_adminq)
drivers/nvme/host/apple.c
301
memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd));
drivers/nvme/host/apple.c
303
memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES),
drivers/nvme/host/apple.c
306
if (++q->sq_tail == anv->hw->max_queue_depth)
drivers/nvme/host/apple.c
307
q->sq_tail = 0;
drivers/nvme/host/apple.c
309
writel(q->sq_tail, q->sq_db);
drivers/nvme/host/apple.c
314
static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q,
drivers/nvme/host/apple.c
317
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
319
struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
drivers/nvme/host/apple.c
332
memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
drivers/nvme/host/apple.c
345
writel(tag, q->sq_db);
drivers/nvme/host/apple.c
583
struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
drivers/nvme/host/apple.c
600
static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
602
struct nvme_completion *hcqe = &q->cqes[q->cq_head];
drivers/nvme/host/apple.c
604
return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
drivers/nvme/host/apple.c
608
apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
610
if (q->is_adminq)
drivers/nvme/host/apple.c
616
static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
drivers/nvme/host/apple.c
619
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
620
struct nvme_completion *cqe = &q->cqes[idx];
drivers/nvme/host/apple.c
625
apple_nvmmu_inval(q, command_id);
drivers/nvme/host/apple.c
627
req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
drivers/nvme/host/apple.c
640
static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
drivers/nvme/host/apple.c
642
u32 tmp = q->cq_head + 1;
drivers/nvme/host/apple.c
644
if (tmp == apple_nvme_queue_depth(q)) {
drivers/nvme/host/apple.c
645
q->cq_head = 0;
drivers/nvme/host/apple.c
646
q->cq_phase ^= 1;
drivers/nvme/host/apple.c
648
q->cq_head = tmp;
drivers/nvme/host/apple.c
652
static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
drivers/nvme/host/apple.c
657
while (apple_nvme_cqe_pending(q)) {
drivers/nvme/host/apple.c
665
apple_nvme_handle_cqe(q, iob, q->cq_head);
drivers/nvme/host/apple.c
666
apple_nvme_update_cq_head(q);
drivers/nvme/host/apple.c
670
writel(q->cq_head, q->cq_db);
drivers/nvme/host/apple.c
675
static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
drivers/nvme/host/apple.c
680
if (!READ_ONCE(q->enabled) && !force)
drivers/nvme/host/apple.c
683
found = apple_nvme_poll_cq(q, &iob);
drivers/nvme/host/apple.c
769
struct apple_nvme_queue *q = hctx->driver_data;
drivers/nvme/host/apple.c
770
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
783
if (unlikely(!READ_ONCE(q->enabled)))
drivers/nvme/host/apple.c
802
apple_nvme_submit_cmd_t8103(q, cmnd);
drivers/nvme/host/apple.c
804
apple_nvme_submit_cmd_t8015(q, cmnd);
drivers/nvme/host/apple.c
824
struct apple_nvme_queue *q = set->driver_data;
drivers/nvme/host/apple.c
825
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
829
iod->q = q;
drivers/nvme/host/apple.c
918
struct apple_nvme_queue *q = iod->q;
drivers/nvme/host/apple.c
919
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
940
req->tag, q->is_adminq);
drivers/nvme/host/apple.c
953
apple_nvme_handle_cq(q, false);
drivers/nvme/host/apple.c
958
req->tag, q->is_adminq);
drivers/nvme/host/apple.c
968
req->tag, q->is_adminq);
drivers/nvme/host/apple.c
978
struct apple_nvme_queue *q = hctx->driver_data;
drivers/nvme/host/apple.c
979
struct apple_nvme *anv = queue_to_apple_nvme(q);
drivers/nvme/host/apple.c
984
found = apple_nvme_poll_cq(q, iob);
drivers/nvme/host/auth.c
1006
int ret, q;
drivers/nvme/host/auth.c
1034
for (q = 1; q < ctrl->queue_count; q++) {
drivers/nvme/host/auth.c
1036
&ctrl->dhchap_ctxs[q];
drivers/nvme/host/auth.c
1051
for (q = 1; q < ctrl->queue_count; q++) {
drivers/nvme/host/auth.c
1053
&ctrl->dhchap_ctxs[q];
drivers/nvme/host/auth.c
1061
"qid %d: authentication failed\n", q);
drivers/nvme/host/auth.c
65
struct request_queue *q = ctrl->fabrics_q;
drivers/nvme/host/auth.c
70
q = ctrl->connect_q;
drivers/nvme/host/auth.c
85
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
drivers/nvme/host/core.c
1159
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
drivers/nvme/host/core.c
1172
req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
drivers/nvme/host/core.c
1174
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
drivers/nvme/host/core.c
1198
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
drivers/nvme/host/core.c
1201
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
drivers/nvme/host/core.c
3060
const struct nvme_core_quirk_entry *q)
drivers/nvme/host/core.c
3062
return q->vid == le16_to_cpu(id->vid) &&
drivers/nvme/host/core.c
3063
string_matches(id->mn, q->mn, sizeof(id->mn)) &&
drivers/nvme/host/core.c
3064
string_matches(id->fr, q->fr, sizeof(id->fr));
drivers/nvme/host/core.c
334
blk_mq_delay_kick_requeue_list(req->q, delay);
drivers/nvme/host/core.c
339
struct nvme_ns *ns = req->q->queuedata;
drivers/nvme/host/core.c
370
struct nvme_ns *ns = req->q->queuedata;
drivers/nvme/host/core.c
414
blk_queue_dying(req->q))
drivers/nvme/host/core.c
417
if (blk_queue_dying(req->q))
drivers/nvme/host/core.c
428
struct nvme_ns *ns = req->q->queuedata;
drivers/nvme/host/core.c
723
if (req->q->queuedata) {
drivers/nvme/host/core.c
724
struct nvme_ns *ns = req->q->disk->private_data;
drivers/nvme/host/core.c
788
if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
drivers/nvme/host/core.c
851
if (queue_max_discard_segments(req->q) == 1) {
drivers/nvme/host/core.c
968
struct request_queue *q = req->q;
drivers/nvme/host/core.c
969
u32 boundary_bytes = queue_atomic_write_boundary_bytes(q);
drivers/nvme/host/core.c
971
if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q))
drivers/nvme/host/fault_inject.c
60
struct gendisk *disk = req->q->disk;
drivers/nvme/host/fc.c
2440
int q;
drivers/nvme/host/fc.c
2447
for (q = 1; q < ctrl->ctrl.queue_count; q++)
drivers/nvme/host/fc.c
2448
clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
drivers/nvme/host/ioctl.c
101
static struct request *nvme_alloc_user_request(struct request_queue *q,
drivers/nvme/host/ioctl.c
107
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
drivers/nvme/host/ioctl.c
119
struct request_queue *q = req->q;
drivers/nvme/host/ioctl.c
120
struct nvme_ns *ns = q->queuedata;
drivers/nvme/host/ioctl.c
140
ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
drivers/nvme/host/ioctl.c
162
static int nvme_submit_user_cmd(struct request_queue *q,
drivers/nvme/host/ioctl.c
167
struct nvme_ns *ns = q->queuedata;
drivers/nvme/host/ioctl.c
174
req = nvme_alloc_user_request(q, cmd, 0, 0);
drivers/nvme/host/ioctl.c
452
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
drivers/nvme/host/ioctl.c
515
req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
drivers/nvme/host/multipath.c
139
struct nvme_ns *ns = req->q->queuedata;
drivers/nvme/host/multipath.c
182
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/host/multipath.c
203
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/host/nvme.h
1046
struct nvme_ns *ns = req->q->queuedata;
drivers/nvme/host/nvme.h
183
static inline char *nvme_quirk_name(enum nvme_quirks q)
drivers/nvme/host/nvme.h
185
switch (q) {
drivers/nvme/host/nvme.h
273
if (!req->q->queuedata)
drivers/nvme/host/nvme.h
802
if (unlikely(blk_should_fake_timeout(req->q)))
drivers/nvme/host/nvme.h
960
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
drivers/nvme/host/nvme.h
962
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
drivers/nvme/host/pci.c
1379
ret = nvme_setup_cmd(req->q->queuedata, req);
drivers/nvme/host/pci.c
3067
struct request_queue *q = nvmeq->dev->ctrl.admin_q;
drivers/nvme/host/pci.c
3074
req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
drivers/nvme/host/pci.c
34
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
drivers/nvme/host/pci.c
35
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
drivers/nvme/host/rdma.c
1413
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/host/trace.h
106
__assign_disk_name(__entry->disk, req->q->disk);
drivers/nvme/host/trace.h
153
__assign_disk_name(__entry->disk, req->q->disk);
drivers/nvme/host/trace.h
71
__assign_disk_name(__entry->disk, req->q->disk);
drivers/nvme/host/zns.c
122
struct request_queue *q = ns->disk->queue;
drivers/nvme/host/zns.c
135
queue_max_hw_sectors(q) << SECTOR_SHIFT);
drivers/nvme/host/zns.c
136
bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
drivers/nvme/target/fc.c
2132
queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
drivers/nvme/target/fc.c
2137
sqtail = atomic_read(&q->sqtail) % q->sqsize;
drivers/nvme/target/fc.c
2139
used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
drivers/nvme/target/fc.c
2140
return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
drivers/nvme/target/passthru.c
219
struct nvme_ns *ns = rq->q->queuedata;
drivers/nvme/target/passthru.c
300
struct request_queue *q = ctrl->admin_q;
drivers/nvme/target/passthru.c
318
q = ns->queue;
drivers/nvme/target/passthru.c
324
rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
drivers/nvme/target/rdma.c
1590
struct nvmet_rdma_queue *q;
drivers/nvme/target/rdma.c
1595
list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
drivers/nvme/target/rdma.c
1596
if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
drivers/nvme/target/rdma.c
1597
q->state == NVMET_RDMA_Q_DISCONNECTING)
drivers/nvme/target/tcp.c
2158
struct nvmet_tcp_queue *q;
drivers/nvme/target/tcp.c
2163
list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
drivers/nvme/target/tcp.c
2164
if (q->nvme_sq.ctrl == sq->ctrl &&
drivers/nvme/target/tcp.c
2165
q->state == NVMET_TCP_Q_DISCONNECTING)
drivers/of/fdt.c
945
const char *p, *q, *options = NULL;
drivers/of/fdt.c
963
q = strchrnul(p, ':');
drivers/of/fdt.c
964
if (*q != '\0')
drivers/of/fdt.c
965
options = q + 1;
drivers/of/fdt.c
966
l = q - p;
drivers/parport/probe.c
113
if (q)
drivers/parport/probe.c
114
p = q + 1;
drivers/parport/probe.c
56
char *p = txt, *q;
drivers/parport/probe.c
67
q = strchr(p, ';');
drivers/parport/probe.c
68
if (q) *q = 0;
drivers/pcmcia/cistpl.c
1008
if (p == q)
drivers/pcmcia/cistpl.c
1013
if (p == q)
drivers/pcmcia/cistpl.c
1022
static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
drivers/pcmcia/cistpl.c
1027
if (p == q)
drivers/pcmcia/cistpl.c
1034
if (++p == q)
drivers/pcmcia/cistpl.c
1040
if (p == q)
drivers/pcmcia/cistpl.c
1045
if (p == q)
drivers/pcmcia/cistpl.c
1051
if (p == q)
drivers/pcmcia/cistpl.c
1063
static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
drivers/pcmcia/cistpl.c
1065
if (p == q)
drivers/pcmcia/cistpl.c
1069
if (p+2 > q)
drivers/pcmcia/cistpl.c
1081
u_char *p, *q, features;
drivers/pcmcia/cistpl.c
1084
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
1090
if (++p == q)
drivers/pcmcia/cistpl.c
1105
if (++p == q)
drivers/pcmcia/cistpl.c
1111
p = parse_power(p, q, &entry->vcc);
drivers/pcmcia/cistpl.c
1117
p = parse_power(p, q, &entry->vpp1);
drivers/pcmcia/cistpl.c
1123
p = parse_power(p, q, &entry->vpp2);
drivers/pcmcia/cistpl.c
1131
p = parse_timing(p, q, &entry->timing);
drivers/pcmcia/cistpl.c
1142
p = parse_io(p, q, &entry->io);
drivers/pcmcia/cistpl.c
1150
p = parse_irq(p, q, &entry->irq);
drivers/pcmcia/cistpl.c
1166
if (p > q)
drivers/pcmcia/cistpl.c
1175
if (p > q)
drivers/pcmcia/cistpl.c
1179
p = parse_mem(p, q, &entry->mem);
drivers/pcmcia/cistpl.c
1187
if (p == q)
drivers/pcmcia/cistpl.c
1191
if (++p == q)
drivers/pcmcia/cistpl.c
1196
entry->subtuples = q-p;
drivers/pcmcia/cistpl.c
1204
u_char *p, *q;
drivers/pcmcia/cistpl.c
1208
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
1211
if (p > q-6)
drivers/pcmcia/cistpl.c
1228
u_char *p, *q;
drivers/pcmcia/cistpl.c
1234
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
1243
return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
drivers/pcmcia/cistpl.c
1249
u_char *p, *q;
drivers/pcmcia/cistpl.c
1253
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
1254
if (p == q)
drivers/pcmcia/cistpl.c
1257
if (++p == q)
drivers/pcmcia/cistpl.c
1263
if (++p == q)
drivers/pcmcia/cistpl.c
663
u_char *p, *q;
drivers/pcmcia/cistpl.c
666
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
692
if (++p == q)
drivers/pcmcia/cistpl.c
696
if (++p == q)
drivers/pcmcia/cistpl.c
703
if (++p == q)
drivers/pcmcia/cistpl.c
712
if (++p == q)
drivers/pcmcia/cistpl.c
761
static int parse_strings(u_char *p, u_char *q, int max,
drivers/pcmcia/cistpl.c
766
if (p == q)
drivers/pcmcia/cistpl.c
778
if (++p == q)
drivers/pcmcia/cistpl.c
781
if ((*p == 0xff) || (++p == q))
drivers/pcmcia/cistpl.c
795
u_char *p, *q;
drivers/pcmcia/cistpl.c
798
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
802
if (p >= q)
drivers/pcmcia/cistpl.c
805
return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
drivers/pcmcia/cistpl.c
812
u_char *p, *q;
drivers/pcmcia/cistpl.c
815
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
817
return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
drivers/pcmcia/cistpl.c
824
u_char *p, *q;
drivers/pcmcia/cistpl.c
828
q = p + tuple->TupleDataLen;
drivers/pcmcia/cistpl.c
831
if (p > q-2)
drivers/pcmcia/cistpl.c
906
static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
drivers/pcmcia/cistpl.c
911
if (p == q)
drivers/pcmcia/cistpl.c
918
if (p == q)
drivers/pcmcia/cistpl.c
923
if (++p == q)
drivers/pcmcia/cistpl.c
943
static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
drivers/pcmcia/cistpl.c
947
if (p == q)
drivers/pcmcia/cistpl.c
951
if (++p == q)
drivers/pcmcia/cistpl.c
959
if (++p == q)
drivers/pcmcia/cistpl.c
967
if (++p == q)
drivers/pcmcia/cistpl.c
978
static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
drivers/pcmcia/cistpl.c
982
if (p == q)
drivers/pcmcia/cistpl.c
993
if (++p == q)
drivers/pcmcia/rsrc_nonstatic.c
1043
struct resource_map *p, *q;
drivers/pcmcia/rsrc_nonstatic.c
1045
for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) {
drivers/pcmcia/rsrc_nonstatic.c
1046
q = p->next;
drivers/pcmcia/rsrc_nonstatic.c
1049
for (p = data->mem_db.next; p != &data->mem_db; p = q) {
drivers/pcmcia/rsrc_nonstatic.c
1050
q = p->next;
drivers/pcmcia/rsrc_nonstatic.c
1053
for (p = data->io_db.next; p != &data->io_db; p = q) {
drivers/pcmcia/rsrc_nonstatic.c
1054
q = p->next;
drivers/pcmcia/rsrc_nonstatic.c
110
struct resource_map *p, *q;
drivers/pcmcia/rsrc_nonstatic.c
120
q = kmalloc_obj(struct resource_map);
drivers/pcmcia/rsrc_nonstatic.c
121
if (!q) {
drivers/pcmcia/rsrc_nonstatic.c
125
q->base = base; q->num = num;
drivers/pcmcia/rsrc_nonstatic.c
126
q->next = p->next; p->next = q;
drivers/pcmcia/rsrc_nonstatic.c
134
struct resource_map *p, *q;
drivers/pcmcia/rsrc_nonstatic.c
136
for (p = map; ; p = q) {
drivers/pcmcia/rsrc_nonstatic.c
137
q = p->next;
drivers/pcmcia/rsrc_nonstatic.c
138
if (q == map)
drivers/pcmcia/rsrc_nonstatic.c
140
if ((q->base+q->num > base) && (base+num > q->base)) {
drivers/pcmcia/rsrc_nonstatic.c
141
if (q->base >= base) {
drivers/pcmcia/rsrc_nonstatic.c
142
if (q->base+q->num <= base+num) {
drivers/pcmcia/rsrc_nonstatic.c
144
p->next = q->next;
drivers/pcmcia/rsrc_nonstatic.c
145
kfree(q);
drivers/pcmcia/rsrc_nonstatic.c
147
q = p;
drivers/pcmcia/rsrc_nonstatic.c
150
q->num = q->base + q->num - base - num;
drivers/pcmcia/rsrc_nonstatic.c
151
q->base = base + num;
drivers/pcmcia/rsrc_nonstatic.c
153
} else if (q->base+q->num <= base+num) {
drivers/pcmcia/rsrc_nonstatic.c
155
q->num = base - q->base;
drivers/pcmcia/rsrc_nonstatic.c
164
p->num = q->base+q->num - p->base;
drivers/pcmcia/rsrc_nonstatic.c
165
q->num = base - q->base;
drivers/pcmcia/rsrc_nonstatic.c
166
p->next = q->next ; q->next = p;
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
1272
#define NPCM8XX_PINCFG(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
1274
.flag = q, \
drivers/pinctrl/renesas/pinctrl-rzt2h.c
111
RZT2H_PINCTRL_REG_ACCESS(q, u64)
drivers/platform/chrome/wilco_ec/event.c
107
struct ec_event_queue *q;
drivers/platform/chrome/wilco_ec/event.c
109
q = kzalloc_flex(*q, entries, capacity);
drivers/platform/chrome/wilco_ec/event.c
110
if (!q)
drivers/platform/chrome/wilco_ec/event.c
113
q->capacity = capacity;
drivers/platform/chrome/wilco_ec/event.c
115
return q;
drivers/platform/chrome/wilco_ec/event.c
118
static inline bool event_queue_empty(struct ec_event_queue *q)
drivers/platform/chrome/wilco_ec/event.c
121
return q->head == q->tail && !q->entries[q->head];
drivers/platform/chrome/wilco_ec/event.c
124
static inline bool event_queue_full(struct ec_event_queue *q)
drivers/platform/chrome/wilco_ec/event.c
127
return q->head == q->tail && q->entries[q->head];
drivers/platform/chrome/wilco_ec/event.c
130
static struct ec_event *event_queue_pop(struct ec_event_queue *q)
drivers/platform/chrome/wilco_ec/event.c
134
if (event_queue_empty(q))
drivers/platform/chrome/wilco_ec/event.c
137
ev = q->entries[q->tail];
drivers/platform/chrome/wilco_ec/event.c
138
q->entries[q->tail] = NULL;
drivers/platform/chrome/wilco_ec/event.c
139
q->tail = (q->tail + 1) % q->capacity;
drivers/platform/chrome/wilco_ec/event.c
148
static struct ec_event *event_queue_push(struct ec_event_queue *q,
drivers/platform/chrome/wilco_ec/event.c
153
if (event_queue_full(q))
drivers/platform/chrome/wilco_ec/event.c
154
popped = event_queue_pop(q);
drivers/platform/chrome/wilco_ec/event.c
155
q->entries[q->head] = ev;
drivers/platform/chrome/wilco_ec/event.c
156
q->head = (q->head + 1) % q->capacity;
drivers/platform/chrome/wilco_ec/event.c
161
static void event_queue_free(struct ec_event_queue *q)
drivers/platform/chrome/wilco_ec/event.c
165
while ((event = event_queue_pop(q)) != NULL)
drivers/platform/chrome/wilco_ec/event.c
168
kfree(q);
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
1304
struct list_head *q, *buf_head;
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
1325
list_for_each_safe(buf_head, q, &port->buffers) {
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
1354
struct list_head *q, *buf_head;
drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c
1370
list_for_each_safe(buf_head, q, &port->buffers) {
drivers/platform/surface/aggregator/controller.c
644
static void ssam_event_queue_push(struct ssam_event_queue *q,
drivers/platform/surface/aggregator/controller.c
647
spin_lock(&q->lock);
drivers/platform/surface/aggregator/controller.c
648
list_add_tail(&item->node, &q->head);
drivers/platform/surface/aggregator/controller.c
649
spin_unlock(&q->lock);
drivers/platform/surface/aggregator/controller.c
659
static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
drivers/platform/surface/aggregator/controller.c
663
spin_lock(&q->lock);
drivers/platform/surface/aggregator/controller.c
664
item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
drivers/platform/surface/aggregator/controller.c
667
spin_unlock(&q->lock);
drivers/platform/surface/aggregator/controller.c
676
static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
drivers/platform/surface/aggregator/controller.c
680
spin_lock(&q->lock);
drivers/platform/surface/aggregator/controller.c
681
empty = list_empty(&q->head);
drivers/platform/surface/aggregator/controller.c
682
spin_unlock(&q->lock);
drivers/platform/surface/aggregator/ssh_packet_layer.c
700
struct ssh_packet *q;
drivers/platform/surface/aggregator/ssh_packet_layer.c
719
q = list_entry(head, struct ssh_packet, queue_node);
drivers/platform/surface/aggregator/ssh_packet_layer.c
721
if (q->priority < p->priority)
drivers/platform/surface/aggregator/ssh_packet_layer.c
726
q = list_entry(head, struct ssh_packet, queue_node);
drivers/platform/surface/aggregator/ssh_packet_layer.c
728
if (q->priority >= p->priority) {
drivers/ptp/ptp_clock.c
42
static inline int queue_free(struct timestamp_event_queue *q)
drivers/ptp/ptp_clock.c
44
return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
drivers/ptp/ptp_private.h
100
int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
drivers/ptp/ptp_private.h
94
static inline int queue_cnt(const struct timestamp_event_queue *q)
drivers/remoteproc/imx_dsp_rproc.c
808
int i, q, r;
drivers/remoteproc/imx_dsp_rproc.c
815
q = size / 4;
drivers/remoteproc/imx_dsp_rproc.c
819
for (i = 0; i < q; i++)
drivers/remoteproc/imx_dsp_rproc.c
830
tmp = readl(dest + q * 4);
drivers/remoteproc/imx_dsp_rproc.c
835
tmp |= (src_byte[q * 4 + i] << (8 * i));
drivers/remoteproc/imx_dsp_rproc.c
837
writel(tmp, dest + q * 4);
drivers/remoteproc/imx_dsp_rproc.c
854
int q, r;
drivers/remoteproc/imx_dsp_rproc.c
864
q = size / 4;
drivers/remoteproc/imx_dsp_rproc.c
867
while (q--)
drivers/s390/block/dasd.c
2727
blk_mq_run_hw_queues(req->q, true);
drivers/s390/block/dasd.c
2737
} else if (likely(!blk_should_fake_timeout(req->q))) {
drivers/s390/block/dasd.c
3114
struct dasd_block *block = req->q->queuedata;
drivers/s390/block/dasd.c
3203
blk_mq_run_hw_queues(req->q, true);
drivers/s390/block/scm_blk.c
257
if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
drivers/s390/cio/qdio.h
263
#define queue_type(q) q->irq_ptr->qib.qfmt
drivers/s390/cio/qdio.h
264
#define SCH_NO(q) (q->irq_ptr->schid.sch_no)
drivers/s390/cio/qdio.h
281
static inline void account_sbals_error(struct qdio_q *q, int count)
drivers/s390/cio/qdio.h
283
q->q_stats.nr_sbal_error += count;
drivers/s390/cio/qdio.h
284
q->q_stats.nr_sbal_total += count;
drivers/s390/cio/qdio.h
288
static inline int multicast_outbound(struct qdio_q *q)
drivers/s390/cio/qdio.h
290
return (q->irq_ptr->nr_output_qs > 1) &&
drivers/s390/cio/qdio.h
291
(q->nr == q->irq_ptr->nr_output_qs - 1);
drivers/s390/cio/qdio.h
303
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
drivers/s390/cio/qdio.h
309
#define for_each_input_queue(irq_ptr, q, i) \
drivers/s390/cio/qdio.h
311
({ q = irq_ptr->input_qs[i]; 1; }); i++)
drivers/s390/cio/qdio.h
312
#define for_each_output_queue(irq_ptr, q, i) \
drivers/s390/cio/qdio.h
314
({ q = irq_ptr->output_qs[i]; 1; }); i++)
drivers/s390/cio/qdio.h
346
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
drivers/s390/cio/qdio_debug.c
101
struct qdio_q *q = m->private;
drivers/s390/cio/qdio_debug.c
104
if (!q)
drivers/s390/cio/qdio_debug.c
107
seq_printf(m, "Timestamp: %llx\n", q->timestamp);
drivers/s390/cio/qdio_debug.c
109
q->irq_ptr->last_data_irq_time, last_ai_time);
drivers/s390/cio/qdio_debug.c
111
atomic_read(&q->nr_buf_used), q->first_to_check);
drivers/s390/cio/qdio_debug.c
112
if (q->is_input_q) {
drivers/s390/cio/qdio_debug.c
114
q->u.in.batch_start, q->u.in.batch_count);
drivers/s390/cio/qdio_debug.c
116
*(u8 *)q->irq_ptr->dsci,
drivers/s390/cio/qdio_debug.c
118
&q->irq_ptr->poll_state));
drivers/s390/cio/qdio_debug.c
124
debug_get_buf_state(q, i, &state);
drivers/s390/cio/qdio_debug.c
162
if (!q->irq_ptr->perf_stat_enabled) {
drivers/s390/cio/qdio_debug.c
169
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
drivers/s390/cio/qdio_debug.c
170
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
drivers/s390/cio/qdio_debug.c
172
q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
drivers/s390/cio/qdio_debug.c
173
q->q_stats.nr_sbal_total);
drivers/s390/cio/qdio_debug.c
241
struct qdio_q *q;
drivers/s390/cio/qdio_debug.c
256
for_each_input_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_debug.c
257
memset(&q->q_stats, 0, sizeof(q->q_stats));
drivers/s390/cio/qdio_debug.c
258
for_each_output_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_debug.c
259
memset(&q->q_stats, 0, sizeof(q->q_stats));
drivers/s390/cio/qdio_debug.c
283
static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q)
drivers/s390/cio/qdio_debug.c
288
q->is_input_q ? "input" : "output",
drivers/s390/cio/qdio_debug.c
289
q->nr);
drivers/s390/cio/qdio_debug.c
290
debugfs_create_file(name, 0444, parent, q, &qstat_fops);
drivers/s390/cio/qdio_debug.c
295
struct qdio_q *q;
drivers/s390/cio/qdio_debug.c
305
for_each_input_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_debug.c
306
setup_debugfs_entry(irq_ptr->debugfs_dev, q);
drivers/s390/cio/qdio_debug.c
307
for_each_output_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_debug.c
308
setup_debugfs_entry(irq_ptr->debugfs_dev, q);
drivers/s390/cio/qdio_main.c
116
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
drivers/s390/cio/qdio_main.c
119
int tmp_count = count, tmp_start = start, nr = q->nr;
drivers/s390/cio/qdio_main.c
1210
static int handle_inbound(struct qdio_q *q, int bufnr, int count)
drivers/s390/cio/qdio_main.c
1214
qperf_inc(q, inbound_call);
drivers/s390/cio/qdio_main.c
1217
overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
drivers/s390/cio/qdio_main.c
1218
q->u.in.batch_count);
drivers/s390/cio/qdio_main.c
122
qperf_inc(q, eqbs);
drivers/s390/cio/qdio_main.c
1220
q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
drivers/s390/cio/qdio_main.c
1221
q->u.in.batch_count -= overlap;
drivers/s390/cio/qdio_main.c
1224
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
drivers/s390/cio/qdio_main.c
1225
atomic_add(count, &q->nr_buf_used);
drivers/s390/cio/qdio_main.c
1227
if (qdio_need_siga_in(q->irq_ptr))
drivers/s390/cio/qdio_main.c
1228
return qdio_siga_input(q);
drivers/s390/cio/qdio_main.c
124
if (!q->is_input_q)
drivers/s390/cio/qdio_main.c
125
nr += q->irq_ptr->nr_input_qs;
drivers/s390/cio/qdio_main.c
1269
static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
drivers/s390/cio/qdio_main.c
127
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
drivers/s390/cio/qdio_main.c
1275
qperf_inc(q, outbound_call);
drivers/s390/cio/qdio_main.c
1277
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
drivers/s390/cio/qdio_main.c
1278
used = atomic_add_return(count, &q->nr_buf_used);
drivers/s390/cio/qdio_main.c
1281
qperf_inc(q, outbound_queue_full);
drivers/s390/cio/qdio_main.c
1283
if (queue_type(q) == QDIO_IQDIO_QFMT) {
drivers/s390/cio/qdio_main.c
1287
rc = qdio_kick_outbound_q(q, count, phys_aob);
drivers/s390/cio/qdio_main.c
1288
} else if (qdio_need_siga_sync(q->irq_ptr)) {
drivers/s390/cio/qdio_main.c
1289
rc = qdio_sync_output_queue(q);
drivers/s390/cio/qdio_main.c
1291
get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
drivers/s390/cio/qdio_main.c
1294
qperf_inc(q, fast_requeue);
drivers/s390/cio/qdio_main.c
1296
rc = qdio_kick_outbound_q(q, count, 0);
drivers/s390/cio/qdio_main.c
1343
struct qdio_q *q;
drivers/s390/cio/qdio_main.c
1350
for_each_input_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_main.c
1351
qdio_stop_polling(q);
drivers/s390/cio/qdio_main.c
1362
for_each_input_queue(irq_ptr, q, i) {
drivers/s390/cio/qdio_main.c
1363
if (!qdio_inbound_q_done(q, q->first_to_check))
drivers/s390/cio/qdio_main.c
137
qperf_inc(q, eqbs_partial);
drivers/s390/cio/qdio_main.c
138
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
drivers/s390/cio/qdio_main.c
143
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
drivers/s390/cio/qdio_main.c
146
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
drivers/s390/cio/qdio_main.c
147
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
drivers/s390/cio/qdio_main.c
149
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
drivers/s390/cio/qdio_main.c
150
q->first_to_check, count, q->irq_ptr->int_parm);
drivers/s390/cio/qdio_main.c
166
static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
drivers/s390/cio/qdio_main.c
171
int nr = q->nr;
drivers/s390/cio/qdio_main.c
173
qperf_inc(q, sqbs);
drivers/s390/cio/qdio_main.c
175
if (!q->is_input_q)
drivers/s390/cio/qdio_main.c
176
nr += q->irq_ptr->nr_input_qs;
drivers/s390/cio/qdio_main.c
178
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
drivers/s390/cio/qdio_main.c
188
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
drivers/s390/cio/qdio_main.c
189
qperf_inc(q, sqbs_partial);
drivers/s390/cio/qdio_main.c
192
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
drivers/s390/cio/qdio_main.c
193
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
drivers/s390/cio/qdio_main.c
195
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
drivers/s390/cio/qdio_main.c
196
q->first_to_check, count, q->irq_ptr->int_parm);
drivers/s390/cio/qdio_main.c
205
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
drivers/s390/cio/qdio_main.c
212
if (is_qebsm(q))
drivers/s390/cio/qdio_main.c
213
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
drivers/s390/cio/qdio_main.c
216
__state = q->slsb.val[bufnr];
drivers/s390/cio/qdio_main.c
226
if (q->slsb.val[bufnr] != __state)
drivers/s390/cio/qdio_main.c
235
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
drivers/s390/cio/qdio_main.c
238
return get_buf_states(q, bufnr, state, 1, auto_ack);
drivers/s390/cio/qdio_main.c
242
static inline int set_buf_states(struct qdio_q *q, int bufnr,
drivers/s390/cio/qdio_main.c
247
if (is_qebsm(q))
drivers/s390/cio/qdio_main.c
248
return qdio_do_sqbs(q, state, bufnr, count);
drivers/s390/cio/qdio_main.c
254
WRITE_ONCE(q->slsb.val[bufnr], state);
drivers/s390/cio/qdio_main.c
264
static inline int set_buf_state(struct qdio_q *q, int bufnr,
drivers/s390/cio/qdio_main.c
267
return set_buf_states(q, bufnr, state, 1);
drivers/s390/cio/qdio_main.c
273
struct qdio_q *q;
drivers/s390/cio/qdio_main.c
276
for_each_input_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_main.c
277
set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
drivers/s390/cio/qdio_main.c
279
for_each_output_queue(irq_ptr, q, i)
drivers/s390/cio/qdio_main.c
280
set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
drivers/s390/cio/qdio_main.c
284
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
drivers/s390/cio/qdio_main.c
287
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
drivers/s390/cio/qdio_main.c
291
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
drivers/s390/cio/qdio_main.c
292
qperf_inc(q, siga_sync);
drivers/s390/cio/qdio_main.c
294
if (is_qebsm(q)) {
drivers/s390/cio/qdio_main.c
295
schid = q->irq_ptr->sch_token;
drivers/s390/cio/qdio_main.c
301
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
drivers/s390/cio/qdio_main.c
305
static inline int qdio_sync_input_queue(struct qdio_q *q)
drivers/s390/cio/qdio_main.c
307
return qdio_siga_sync(q, 0, q->mask);
drivers/s390/cio/qdio_main.c
310
static inline int qdio_sync_output_queue(struct qdio_q *q)
drivers/s390/cio/qdio_main.c
312
return qdio_siga_sync(q, q->mask, 0);
drivers/s390/cio/qdio_main.c
315
static inline int qdio_siga_sync_q(struct qdio_q *q)
drivers/s390/cio/qdio_main.c
317
if (q->is_input_q)
drivers/s390/cio/qdio_main.c
318
return qdio_sync_input_queue(q);
drivers/s390/cio/qdio_main.c
320
return qdio_sync_output_queue(q);
drivers/s390/cio/qdio_main.c
323
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
drivers/s390/cio/qdio_main.c
326
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
drivers/s390/cio/qdio_main.c
331
if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
drivers/s390/cio/qdio_main.c
338
if (is_qebsm(q)) {
drivers/s390/cio/qdio_main.c
339
schid = q->irq_ptr->sch_token;
drivers/s390/cio/qdio_main.c
343
cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
drivers/s390/cio/qdio_main.c
357
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
drivers/s390/cio/qdio_main.c
358
"%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
drivers/s390/cio/qdio_main.c
359
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
drivers/s390/cio/qdio_main.c
364
static inline int qdio_siga_input(struct qdio_q *q)
drivers/s390/cio/qdio_main.c
366
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
drivers/s390/cio/qdio_main.c
370
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
drivers/s390/cio/qdio_main.c
371
qperf_inc(q, siga_read);
drivers/s390/cio/qdio_main.c
373
if (is_qebsm(q)) {
drivers/s390/cio/qdio_main.c
374
schid = q->irq_ptr->sch_token;
drivers/s390/cio/qdio_main.c
378
cc = do_siga_input(schid, q->mask, fc);
drivers/s390/cio/qdio_main.c
380
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
drivers/s390/cio/qdio_main.c
384
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
drivers/s390/cio/qdio_main.c
387
if (qdio_need_siga_sync(q->irq_ptr))
drivers/s390/cio/qdio_main.c
388
qdio_siga_sync_q(q);
drivers/s390/cio/qdio_main.c
389
return get_buf_state(q, bufnr, state, 0);
drivers/s390/cio/qdio_main.c
392
static inline void qdio_stop_polling(struct qdio_q *q)
drivers/s390/cio/qdio_main.c
394
if (!q->u.in.batch_count)
drivers/s390/cio/qdio_main.c
397
qperf_inc(q, stop_polling);
drivers/s390/cio/qdio_main.c
400
set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
drivers/s390/cio/qdio_main.c
401
q->u.in.batch_count);
drivers/s390/cio/qdio_main.c
402
q->u.in.batch_count = 0;
drivers/s390/cio/qdio_main.c
405
static inline void account_sbals(struct qdio_q *q, unsigned int count)
drivers/s390/cio/qdio_main.c
407
q->q_stats.nr_sbal_total += count;
drivers/s390/cio/qdio_main.c
408
q->q_stats.nr_sbals[ilog2(count)]++;
drivers/s390/cio/qdio_main.c
411
static void process_buffer_error(struct qdio_q *q, unsigned int start,
drivers/s390/cio/qdio_main.c
415
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
drivers/s390/cio/qdio_main.c
416
q->sbal[start]->element[15].sflags == 0x10) {
drivers/s390/cio/qdio_main.c
417
qperf_inc(q, target_full);
drivers/s390/cio/qdio_main.c
418
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
drivers/s390/cio/qdio_main.c
422
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
drivers/s390/cio/qdio_main.c
423
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
drivers/s390/cio/qdio_main.c
426
q->sbal[start]->element[14].sflags,
drivers/s390/cio/qdio_main.c
427
q->sbal[start]->element[15].sflags);
drivers/s390/cio/qdio_main.c
430
static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
drivers/s390/cio/qdio_main.c
435
set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
drivers/s390/cio/qdio_main.c
437
if (!q->u.in.batch_count)
drivers/s390/cio/qdio_main.c
438
q->u.in.batch_start = start;
drivers/s390/cio/qdio_main.c
439
q->u.in.batch_count += count;
drivers/s390/cio/qdio_main.c
442
static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
drivers/s390/cio/qdio_main.c
448
q->timestamp = get_tod_clock_fast();
drivers/s390/cio/qdio_main.c
450
count = atomic_read(&q->nr_buf_used);
drivers/s390/cio/qdio_main.c
454
if (qdio_need_siga_sync(q->irq_ptr))
drivers/s390/cio/qdio_main.c
455
qdio_sync_input_queue(q);
drivers/s390/cio/qdio_main.c
457
count = get_buf_states(q, start, &state, count, 1);
drivers/s390/cio/qdio_main.c
463
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
drivers/s390/cio/qdio_main.c
466
inbound_handle_work(q, start, count, is_qebsm(q));
drivers/s390/cio/qdio_main.c
467
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
drivers/s390/cio/qdio_main.c
468
qperf_inc(q, inbound_queue_full);
drivers/s390/cio/qdio_main.c
469
if (q->irq_ptr->perf_stat_enabled)
drivers/s390/cio/qdio_main.c
470
account_sbals(q, count);
drivers/s390/cio/qdio_main.c
473
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
drivers/s390/cio/qdio_main.c
477
process_buffer_error(q, start, count);
drivers/s390/cio/qdio_main.c
478
inbound_handle_work(q, start, count, false);
drivers/s390/cio/qdio_main.c
479
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
drivers/s390/cio/qdio_main.c
480
qperf_inc(q, inbound_queue_full);
drivers/s390/cio/qdio_main.c
481
if (q->irq_ptr->perf_stat_enabled)
drivers/s390/cio/qdio_main.c
482
account_sbals_error(q, count);
drivers/s390/cio/qdio_main.c
485
if (q->irq_ptr->perf_stat_enabled)
drivers/s390/cio/qdio_main.c
486
q->q_stats.nr_sbal_nop++;
drivers/s390/cio/qdio_main.c
487
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
drivers/s390/cio/qdio_main.c
488
q->nr, start);
drivers/s390/cio/qdio_main.c
494
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
drivers/s390/cio/qdio_main.c
496
state, start, q->nr);
drivers/s390/cio/qdio_main.c
506
struct qdio_q *q;
drivers/s390/cio/qdio_main.c
512
q = irq->input_qs[nr];
drivers/s390/cio/qdio_main.c
513
start = q->first_to_check;
drivers/s390/cio/qdio_main.c
516
count = get_inbound_buffer_frontier(q, start, error);
drivers/s390/cio/qdio_main.c
521
q->first_to_check = add_buf(start, count);
drivers/s390/cio/qdio_main.c
526
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
drivers/s390/cio/qdio_main.c
530
if (!atomic_read(&q->nr_buf_used))
drivers/s390/cio/qdio_main.c
533
if (qdio_need_siga_sync(q->irq_ptr))
drivers/s390/cio/qdio_main.c
534
qdio_sync_input_queue(q);
drivers/s390/cio/qdio_main.c
535
get_buf_state(q, start, &state, 0);
drivers/s390/cio/qdio_main.c
544
static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
drivers/s390/cio/qdio_main.c
550
q->timestamp = get_tod_clock_fast();
drivers/s390/cio/qdio_main.c
552
count = atomic_read(&q->nr_buf_used);
drivers/s390/cio/qdio_main.c
556
if (qdio_need_siga_sync(q->irq_ptr))
drivers/s390/cio/qdio_main.c
557
qdio_sync_output_queue(q);
drivers/s390/cio/qdio_main.c
559
count = get_buf_states(q, start, &state, count, 0);
drivers/s390/cio/qdio_main.c
569
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
drivers/s390/cio/qdio_main.c
570
"out empty:%1d %02x", q->nr, count);
drivers/s390/cio/qdio_main.c
572
atomic_sub(count, &q->nr_buf_used);
drivers/s390/cio/qdio_main.c
573
if (q->irq_ptr->perf_stat_enabled)
drivers/s390/cio/qdio_main.c
574
account_sbals(q, count);
drivers/s390/cio/qdio_main.c
577
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
drivers/s390/cio/qdio_main.c
578
q->nr, count);
drivers/s390/cio/qdio_main.c
581
process_buffer_error(q, start, count);
drivers/s390/cio/qdio_main.c
582
atomic_sub(count, &q->nr_buf_used);
drivers/s390/cio/qdio_main.c
583
if (q->irq_ptr->perf_stat_enabled)
drivers/s390/cio/qdio_main.c
584
account_sbals_error(q, count);
drivers/s390/cio/qdio_main.c
588
if (q->irq_ptr->perf_stat_enabled)
drivers/s390/cio/qdio_main.c
589
q->q_stats.nr_sbal_nop++;
drivers/s390/cio/qdio_main.c
590
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
drivers/s390/cio/qdio_main.c
591
q->nr);
drivers/s390/cio/qdio_main.c
598
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
drivers/s390/cio/qdio_main.c
600
state, start, q->nr);
drivers/s390/cio/qdio_main.c
610
struct qdio_q *q;
drivers/s390/cio/qdio_main.c
616
q = irq->output_qs[nr];
drivers/s390/cio/qdio_main.c
617
start = q->first_to_check;
drivers/s390/cio/qdio_main.c
620
count = get_outbound_buffer_frontier(q, start, error);
drivers/s390/cio/qdio_main.c
625
q->first_to_check = add_buf(start, count);
drivers/s390/cio/qdio_main.c
630
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
drivers/s390/cio/qdio_main.c
636
if (!qdio_need_siga_out(q->irq_ptr))
drivers/s390/cio/qdio_main.c
639
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
drivers/s390/cio/qdio_main.c
641
qperf_inc(q, siga_write);
drivers/s390/cio/qdio_main.c
643
cc = qdio_siga_output(q, count, &busy_bit, aob);
drivers/s390/cio/qdio_main.c
653
DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
drivers/s390/cio/qdio_main.c
656
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
drivers/s390/cio/qdio_main.c
662
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
drivers/s390/cio/qdio_main.c
667
DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
drivers/s390/cio/qdio_setup.c
102
struct qdio_q *q;
drivers/s390/cio/qdio_setup.c
106
q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
drivers/s390/cio/qdio_setup.c
107
if (!q) {
drivers/s390/cio/qdio_setup.c
112
q->sl_page = (void *)__get_free_page(GFP_KERNEL);
drivers/s390/cio/qdio_setup.c
113
if (!q->sl_page) {
drivers/s390/cio/qdio_setup.c
114
kmem_cache_free(qdio_q_cache, q);
drivers/s390/cio/qdio_setup.c
118
q->slib = q->sl_page;
drivers/s390/cio/qdio_setup.c
120
q->sl = (struct sl *)(q->slib + 1);
drivers/s390/cio/qdio_setup.c
122
irq_ptr_qs[i] = q;
drivers/s390/cio/qdio_setup.c
146
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
drivers/s390/cio/qdio_setup.c
149
struct slib *const slib = q->slib;
drivers/s390/cio/qdio_setup.c
150
void *const sl_page = q->sl_page;
drivers/s390/cio/qdio_setup.c
151
struct sl *const sl = q->sl;
drivers/s390/cio/qdio_setup.c
154
memset(q, 0, sizeof(*q));
drivers/s390/cio/qdio_setup.c
156
q->sl_page = sl_page;
drivers/s390/cio/qdio_setup.c
157
q->sl = sl;
drivers/s390/cio/qdio_setup.c
158
q->slib = slib;
drivers/s390/cio/qdio_setup.c
159
q->irq_ptr = irq_ptr;
drivers/s390/cio/qdio_setup.c
160
q->mask = 1 << (31 - i);
drivers/s390/cio/qdio_setup.c
161
q->nr = i;
drivers/s390/cio/qdio_setup.c
162
q->handler = handler;
drivers/s390/cio/qdio_setup.c
165
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
drivers/s390/cio/qdio_setup.c
171
DBF_HEX(&q, sizeof(void *));
drivers/s390/cio/qdio_setup.c
175
q->sbal[j] = *sbals_array++;
drivers/s390/cio/qdio_setup.c
179
prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
drivers/s390/cio/qdio_setup.c
181
prev->slib->nsliba = (unsigned long)q->slib;
drivers/s390/cio/qdio_setup.c
184
q->slib->sla = (unsigned long)q->sl;
drivers/s390/cio/qdio_setup.c
185
q->slib->slsba = (unsigned long)&q->slsb.val[0];
drivers/s390/cio/qdio_setup.c
189
q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]);
drivers/s390/cio/qdio_setup.c
195
struct qdio_q *q;
drivers/s390/cio/qdio_setup.c
198
for_each_input_queue(irq_ptr, q, i) {
drivers/s390/cio/qdio_setup.c
200
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
drivers/s390/cio/qdio_setup.c
202
q->is_input_q = 1;
drivers/s390/cio/qdio_setup.c
204
setup_storage_lists(q, irq_ptr,
drivers/s390/cio/qdio_setup.c
208
for_each_output_queue(irq_ptr, q, i) {
drivers/s390/cio/qdio_setup.c
210
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
drivers/s390/cio/qdio_setup.c
212
q->is_input_q = 0;
drivers/s390/cio/qdio_setup.c
213
setup_storage_lists(q, irq_ptr,
drivers/s390/cio/qdio_setup.c
81
struct qdio_q *q;
drivers/s390/cio/qdio_setup.c
85
q = queues[i];
drivers/s390/cio/qdio_setup.c
86
free_page((unsigned long)q->sl_page);
drivers/s390/cio/qdio_setup.c
87
kmem_cache_free(qdio_q_cache, q);
drivers/s390/crypto/vfio_ap_ops.c
1002
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
1006
q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
drivers/s390/crypto/vfio_ap_ops.c
1007
if (q)
drivers/s390/crypto/vfio_ap_ops.c
1008
list_add_tail(&q->reset_qnode, qlist);
drivers/s390/crypto/vfio_ap_ops.c
1127
struct vfio_ap_queue *q = NULL;
drivers/s390/crypto/vfio_ap_ops.c
1129
q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
drivers/s390/crypto/vfio_ap_ops.c
1131
if (q)
drivers/s390/crypto/vfio_ap_ops.c
1132
vfio_ap_unlink_queue_fr_mdev(q);
drivers/s390/crypto/vfio_ap_ops.c
1134
return q;
drivers/s390/crypto/vfio_ap_ops.c
1151
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
1154
q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
drivers/s390/crypto/vfio_ap_ops.c
1156
if (q && qlist) {
drivers/s390/crypto/vfio_ap_ops.c
1159
list_add_tail(&q->reset_qnode, qlist);
drivers/s390/crypto/vfio_ap_ops.c
1167
struct vfio_ap_queue *q, *tmpq;
drivers/s390/crypto/vfio_ap_ops.c
1189
list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
drivers/s390/crypto/vfio_ap_ops.c
1190
vfio_ap_unlink_mdev_fr_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
1191
list_del(&q->reset_qnode);
drivers/s390/crypto/vfio_ap_ops.c
1352
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
1355
q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
drivers/s390/crypto/vfio_ap_ops.c
1357
if (q && qlist) {
drivers/s390/crypto/vfio_ap_ops.c
1360
list_add_tail(&q->reset_qnode, qlist);
drivers/s390/crypto/vfio_ap_ops.c
1368
struct vfio_ap_queue *q, *tmpq;
drivers/s390/crypto/vfio_ap_ops.c
1390
list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
drivers/s390/crypto/vfio_ap_ops.c
1391
vfio_ap_unlink_mdev_fr_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
1392
list_del(&q->reset_qnode);
drivers/s390/crypto/vfio_ap_ops.c
1853
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
1856
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
drivers/s390/crypto/vfio_ap_ops.c
1857
if (q->saved_iova >= iova && q->saved_iova < iova + length)
drivers/s390/crypto/vfio_ap_ops.c
1858
vfio_ap_irq_disable(q);
drivers/s390/crypto/vfio_ap_ops.c
189
static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
1904
struct vfio_ap_queue *q = NULL;
drivers/s390/crypto/vfio_ap_ops.c
1911
q = dev_get_drvdata(&queue->ap_dev.device);
drivers/s390/crypto/vfio_ap_ops.c
1915
return q;
drivers/s390/crypto/vfio_ap_ops.c
192
if (q->matrix_mdev && q->matrix_mdev->kvm)
drivers/s390/crypto/vfio_ap_ops.c
193
mutex_lock(&q->matrix_mdev->kvm->lock);
drivers/s390/crypto/vfio_ap_ops.c
1953
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
1955
q = container_of(reset_work, struct vfio_ap_queue, reset_work);
drivers/s390/crypto/vfio_ap_ops.c
1956
memcpy(&status, &q->reset_status, sizeof(status));
drivers/s390/crypto/vfio_ap_ops.c
1960
status = ap_tapq(q->apqn, NULL);
drivers/s390/crypto/vfio_ap_ops.c
1961
ret = apq_status_check(q->apqn, &status);
drivers/s390/crypto/vfio_ap_ops.c
1966
AP_QID_CARD(q->apqn),
drivers/s390/crypto/vfio_ap_ops.c
1967
AP_QID_QUEUE(q->apqn),
drivers/s390/crypto/vfio_ap_ops.c
1972
if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
drivers/s390/crypto/vfio_ap_ops.c
1973
q->reset_status.response_code == AP_RESPONSE_BUSY ||
drivers/s390/crypto/vfio_ap_ops.c
1974
q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
drivers/s390/crypto/vfio_ap_ops.c
1976
status = ap_zapq(q->apqn, 0);
drivers/s390/crypto/vfio_ap_ops.c
1977
memcpy(&q->reset_status, &status, sizeof(status));
drivers/s390/crypto/vfio_ap_ops.c
1980
if (q->saved_isc != VFIO_AP_ISC_INVALID)
drivers/s390/crypto/vfio_ap_ops.c
1981
vfio_ap_free_aqic_resources(q);
drivers/s390/crypto/vfio_ap_ops.c
1987
static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
1991
if (!q)
drivers/s390/crypto/vfio_ap_ops.c
1993
status = ap_zapq(q->apqn, 0);
drivers/s390/crypto/vfio_ap_ops.c
1994
memcpy(&q->reset_status, &status, sizeof(status));
drivers/s390/crypto/vfio_ap_ops.c
2003
queue_work(system_long_wq, &q->reset_work);
drivers/s390/crypto/vfio_ap_ops.c
2007
vfio_ap_free_aqic_resources(q);
drivers/s390/crypto/vfio_ap_ops.c
2012
AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
drivers/s390/crypto/vfio_ap_ops.c
2020
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
2022
hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
drivers/s390/crypto/vfio_ap_ops.c
2023
vfio_ap_mdev_reset_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
2025
hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
drivers/s390/crypto/vfio_ap_ops.c
2026
flush_work(&q->reset_work);
drivers/s390/crypto/vfio_ap_ops.c
2028
if (q->reset_status.response_code)
drivers/s390/crypto/vfio_ap_ops.c
2038
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
2040
list_for_each_entry(q, qlist, reset_qnode)
drivers/s390/crypto/vfio_ap_ops.c
2041
vfio_ap_mdev_reset_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
2043
list_for_each_entry(q, qlist, reset_qnode) {
drivers/s390/crypto/vfio_ap_ops.c
2044
flush_work(&q->reset_work);
drivers/s390/crypto/vfio_ap_ops.c
2046
if (q->reset_status.response_code)
drivers/s390/crypto/vfio_ap_ops.c
210
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
212
hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
drivers/s390/crypto/vfio_ap_ops.c
214
if (q && q->apqn == apqn)
drivers/s390/crypto/vfio_ap_ops.c
215
return q;
drivers/s390/crypto/vfio_ap_ops.c
2294
static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
2297
unsigned long apid = AP_QID_CARD(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2298
unsigned long apqi = AP_QID_QUEUE(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2314
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
2320
q = dev_get_drvdata(&apdev->device);
drivers/s390/crypto/vfio_ap_ops.c
2321
matrix_mdev = vfio_ap_mdev_for_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
2328
apid = AP_QID_CARD(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2329
apqi = AP_QID_QUEUE(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2419
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
2427
q = kzalloc_obj(*q);
drivers/s390/crypto/vfio_ap_ops.c
2428
if (!q) {
drivers/s390/crypto/vfio_ap_ops.c
2433
q->apqn = to_ap_queue(&apdev->device)->qid;
drivers/s390/crypto/vfio_ap_ops.c
2434
q->saved_isc = VFIO_AP_ISC_INVALID;
drivers/s390/crypto/vfio_ap_ops.c
2435
memset(&q->reset_status, 0, sizeof(q->reset_status));
drivers/s390/crypto/vfio_ap_ops.c
2436
INIT_WORK(&q->reset_work, apq_reset_check);
drivers/s390/crypto/vfio_ap_ops.c
2437
matrix_mdev = get_update_locks_by_apqn(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2440
vfio_ap_mdev_link_queue(matrix_mdev, q);
drivers/s390/crypto/vfio_ap_ops.c
2460
dev_set_drvdata(&apdev->device, q);
drivers/s390/crypto/vfio_ap_ops.c
2473
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
2477
q = dev_get_drvdata(&apdev->device);
drivers/s390/crypto/vfio_ap_ops.c
2478
get_update_locks_for_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
2479
matrix_mdev = q->matrix_mdev;
drivers/s390/crypto/vfio_ap_ops.c
2480
apid = AP_QID_CARD(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2481
apqi = AP_QID_QUEUE(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
2506
vfio_ap_mdev_reset_queue(q);
drivers/s390/crypto/vfio_ap_ops.c
2507
flush_work(&q->reset_work);
drivers/s390/crypto/vfio_ap_ops.c
2512
vfio_ap_unlink_queue_fr_mdev(q);
drivers/s390/crypto/vfio_ap_ops.c
2515
kfree(q);
drivers/s390/crypto/vfio_ap_ops.c
269
static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
271
if (!q)
drivers/s390/crypto/vfio_ap_ops.c
273
if (q->saved_isc != VFIO_AP_ISC_INVALID &&
drivers/s390/crypto/vfio_ap_ops.c
274
!WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
drivers/s390/crypto/vfio_ap_ops.c
275
kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
drivers/s390/crypto/vfio_ap_ops.c
276
q->saved_isc = VFIO_AP_ISC_INVALID;
drivers/s390/crypto/vfio_ap_ops.c
278
if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
drivers/s390/crypto/vfio_ap_ops.c
279
vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
drivers/s390/crypto/vfio_ap_ops.c
280
q->saved_iova = 0;
drivers/s390/crypto/vfio_ap_ops.c
302
static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
309
status = ap_aqic(q->apqn, aqic_gisa, 0);
drivers/s390/crypto/vfio_ap_ops.c
313
vfio_ap_wait_for_irqclear(q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
334
vfio_ap_free_aqic_resources(q);
drivers/s390/crypto/vfio_ap_ops.c
39
static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
drivers/s390/crypto/vfio_ap_ops.c
416
static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
drivers/s390/crypto/vfio_ap_ops.c
433
__func__, &nib, q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
440
ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
drivers/s390/crypto/vfio_ap_ops.c
448
__func__, ret, &nib, q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
454
kvm = q->matrix_mdev->kvm;
drivers/s390/crypto/vfio_ap_ops.c
463
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
drivers/s390/crypto/vfio_ap_ops.c
471
__func__, nisc, isc, q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
473
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
drivers/s390/crypto/vfio_ap_ops.c
482
status = ap_aqic(q->apqn, aqic_gisa, h_nib);
drivers/s390/crypto/vfio_ap_ops.c
486
vfio_ap_free_aqic_resources(q);
drivers/s390/crypto/vfio_ap_ops.c
487
q->saved_iova = nib;
drivers/s390/crypto/vfio_ap_ops.c
488
q->saved_isc = isc;
drivers/s390/crypto/vfio_ap_ops.c
495
__func__, ret, isc, q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
496
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
drivers/s390/crypto/vfio_ap_ops.c
499
pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
drivers/s390/crypto/vfio_ap_ops.c
501
vfio_ap_irq_disable(q);
drivers/s390/crypto/vfio_ap_ops.c
512
q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
586
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
622
q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
drivers/s390/crypto/vfio_ap_ops.c
623
if (!q) {
drivers/s390/crypto/vfio_ap_ops.c
634
qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
drivers/s390/crypto/vfio_ap_ops.c
636
qstatus = vfio_ap_irq_disable(q);
drivers/s390/crypto/vfio_ap_ops.c
683
static bool _queue_passable(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
685
if (!q)
drivers/s390/crypto/vfio_ap_ops.c
688
switch (q->reset_status.response_code) {
drivers/s390/crypto/vfio_ap_ops.c
817
struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
819
if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn))
drivers/s390/crypto/vfio_ap_ops.c
822
q->matrix_mdev = matrix_mdev;
drivers/s390/crypto/vfio_ap_ops.c
823
hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
drivers/s390/crypto/vfio_ap_ops.c
828
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
830
q = vfio_ap_find_queue(apqn);
drivers/s390/crypto/vfio_ap_ops.c
831
vfio_ap_mdev_link_queue(matrix_mdev, q);
drivers/s390/crypto/vfio_ap_ops.c
834
static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
836
hash_del(&q->mdev_qnode);
drivers/s390/crypto/vfio_ap_ops.c
839
static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
drivers/s390/crypto/vfio_ap_ops.c
841
q->matrix_mdev = NULL;
drivers/s390/crypto/vfio_ap_ops.c
846
struct vfio_ap_queue *q;
drivers/s390/crypto/vfio_ap_ops.c
852
q = vfio_ap_mdev_get_queue(matrix_mdev,
drivers/s390/crypto/vfio_ap_ops.c
854
if (q)
drivers/s390/crypto/vfio_ap_ops.c
855
q->matrix_mdev = NULL;
drivers/s390/crypto/zcrypt_msgtype50.c
102
unsigned char q[64];
drivers/s390/crypto/zcrypt_msgtype50.c
115
unsigned char q[128];
drivers/s390/crypto/zcrypt_msgtype50.c
128
unsigned char q[256];
drivers/s390/crypto/zcrypt_msgtype50.c
262
unsigned char *p, *q, *dp, *dq, *u, *inp;
drivers/s390/crypto/zcrypt_msgtype50.c
282
q = crb1->q + sizeof(crb1->q) - short_len;
drivers/s390/crypto/zcrypt_msgtype50.c
296
q = crb2->q + sizeof(crb2->q) - short_len;
drivers/s390/crypto/zcrypt_msgtype50.c
311
q = crb3->q + sizeof(crb3->q) - short_len;
drivers/s390/crypto/zcrypt_msgtype50.c
325
copy_from_user(q, crt->nq_prime, short_len) ||
drivers/s390/net/ctcm_fsms.c
213
void ctcm_purge_skb_queue(struct sk_buff_head *q)
drivers/s390/net/ctcm_fsms.c
219
while ((skb = skb_dequeue(q))) {
drivers/s390/net/ctcm_fsms.h
161
void ctcm_purge_skb_queue(struct sk_buff_head *q);
drivers/s390/net/qeth_core.h
523
#define qeth_for_each_output_queue(card, q, i) \
drivers/s390/net/qeth_core.h
525
(q = card->qdio.out_qs[i]); i++)
drivers/s390/net/qeth_core_main.c
1327
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
drivers/s390/net/qeth_core_main.c
1336
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
drivers/s390/net/qeth_core_main.c
1337
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
drivers/s390/net/qeth_core_main.c
1456
static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
drivers/s390/net/qeth_core_main.c
1460
qeth_tx_complete_pending_bufs(q->card, q, true, 0);
drivers/s390/net/qeth_core_main.c
1463
if (!q->bufs[j])
drivers/s390/net/qeth_core_main.c
1466
qeth_clear_output_buffer(q, q->bufs[j], true, 0);
drivers/s390/net/qeth_core_main.c
1468
qeth_free_out_buf(q->bufs[j]);
drivers/s390/net/qeth_core_main.c
1469
q->bufs[j] = NULL;
drivers/s390/net/qeth_core_main.c
2565
static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
drivers/s390/net/qeth_core_main.c
2574
newbuf->buffer = q->qdio_bufs[bidx];
drivers/s390/net/qeth_core_main.c
2578
q->bufs[bidx] = newbuf;
drivers/s390/net/qeth_core_main.c
2582
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
drivers/s390/net/qeth_core_main.c
2584
if (!q)
drivers/s390/net/qeth_core_main.c
2587
qeth_drain_output_queue(q, true);
drivers/s390/net/qeth_core_main.c
2588
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
drivers/s390/net/qeth_core_main.c
2589
kfree(q);
drivers/s390/net/qeth_core_main.c
2594
struct qeth_qdio_out_q *q = kzalloc_obj(*q);
drivers/s390/net/qeth_core_main.c
2597
if (!q)
drivers/s390/net/qeth_core_main.c
2600
if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
drivers/s390/net/qeth_core_main.c
2604
if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
drivers/s390/net/qeth_core_main.c
2608
return q;
drivers/s390/net/qeth_core_main.c
2612
qeth_free_out_buf(q->bufs[--i]);
drivers/s390/net/qeth_core_main.c
2613
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
drivers/s390/net/qeth_core_main.c
2615
kfree(q);
drivers/s390/net/qeth_core_main.c
316
static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
drivers/s390/net/qeth_core_main.c
318
if (!q)
drivers/s390/net/qeth_core_main.c
321
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
drivers/s390/net/qeth_core_main.c
322
kfree(q);
drivers/s390/net/qeth_core_main.c
327
struct qeth_qdio_q *q = kzalloc_obj(*q);
drivers/s390/net/qeth_core_main.c
330
if (!q)
drivers/s390/net/qeth_core_main.c
333
if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
drivers/s390/net/qeth_core_main.c
334
kfree(q);
drivers/s390/net/qeth_core_main.c
339
q->bufs[i].buffer = q->qdio_bufs[i];
drivers/s390/net/qeth_core_main.c
341
QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
drivers/s390/net/qeth_core_main.c
342
return q;
drivers/scsi/aacraid/aacraid.h
2707
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
drivers/scsi/aacraid/aacraid.h
2708
void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
drivers/scsi/aacraid/aacraid.h
2732
unsigned int aac_response_normal(struct aac_queue * q);
drivers/scsi/aacraid/aacraid.h
2733
unsigned int aac_command_normal(struct aac_queue * q);
drivers/scsi/aacraid/comminit.c
259
static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
drivers/scsi/aacraid/comminit.c
261
atomic_set(&q->numpending, 0);
drivers/scsi/aacraid/comminit.c
262
q->dev = dev;
drivers/scsi/aacraid/comminit.c
263
init_waitqueue_head(&q->cmdready);
drivers/scsi/aacraid/comminit.c
264
INIT_LIST_HEAD(&q->cmdq);
drivers/scsi/aacraid/comminit.c
265
init_waitqueue_head(&q->qfull);
drivers/scsi/aacraid/comminit.c
266
spin_lock_init(&q->lockdata);
drivers/scsi/aacraid/comminit.c
267
q->lock = &q->lockdata;
drivers/scsi/aacraid/comminit.c
268
q->headers.producer = (__le32 *)mem;
drivers/scsi/aacraid/comminit.c
269
q->headers.consumer = (__le32 *)(mem+1);
drivers/scsi/aacraid/comminit.c
270
*(q->headers.producer) = cpu_to_le32(qsize);
drivers/scsi/aacraid/comminit.c
271
*(q->headers.consumer) = cpu_to_le32(qsize);
drivers/scsi/aacraid/comminit.c
272
q->entries = qsize;
drivers/scsi/aacraid/commsup.c
353
struct aac_queue * q;
drivers/scsi/aacraid/commsup.c
363
q = &dev->queues->queue[qid];
drivers/scsi/aacraid/commsup.c
365
idx = *index = le32_to_cpu(*(q->headers.producer));
drivers/scsi/aacraid/commsup.c
367
if (idx != le32_to_cpu(*(q->headers.consumer))) {
drivers/scsi/aacraid/commsup.c
374
if (idx != le32_to_cpu(*(q->headers.consumer)))
drivers/scsi/aacraid/commsup.c
387
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
drivers/scsi/aacraid/commsup.c
389
qid, atomic_read(&q->numpending));
drivers/scsi/aacraid/commsup.c
392
*entry = q->base + *index;
drivers/scsi/aacraid/commsup.c
644
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
drivers/scsi/aacraid/commsup.c
645
atomic_dec(&q->numpending);
drivers/scsi/aacraid/commsup.c
792
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
drivers/scsi/aacraid/commsup.c
796
if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
drivers/scsi/aacraid/commsup.c
804
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
drivers/scsi/aacraid/commsup.c
807
index = le32_to_cpu(*q->headers.consumer);
drivers/scsi/aacraid/commsup.c
808
*entry = q->base + index;
drivers/scsi/aacraid/commsup.c
824
void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
drivers/scsi/aacraid/commsup.c
829
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
drivers/scsi/aacraid/commsup.c
832
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
drivers/scsi/aacraid/commsup.c
833
*q->headers.consumer = cpu_to_le32(1);
drivers/scsi/aacraid/commsup.c
835
le32_add_cpu(q->headers.consumer, 1);
drivers/scsi/aacraid/commsup.c
867
struct aac_queue * q;
drivers/scsi/aacraid/commsup.c
912
q = &dev->queues->queue[AdapNormRespQueue];
drivers/scsi/aacraid/commsup.c
913
spin_lock_irqsave(q->lock, qflags);
drivers/scsi/aacraid/commsup.c
915
*(q->headers.producer) = cpu_to_le32(index + 1);
drivers/scsi/aacraid/commsup.c
916
spin_unlock_irqrestore(q->lock, qflags);
drivers/scsi/aacraid/dpcsup.c
135
spin_lock_irqsave(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
143
spin_unlock_irqrestore(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
158
unsigned int aac_command_normal(struct aac_queue *q)
drivers/scsi/aacraid/dpcsup.c
160
struct aac_dev * dev = q->dev;
drivers/scsi/aacraid/dpcsup.c
164
spin_lock_irqsave(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
171
while(aac_consumer_get(dev, q, &entry))
drivers/scsi/aacraid/dpcsup.c
200
list_add_tail(&fib->fiblink, &q->cmdq);
drivers/scsi/aacraid/dpcsup.c
201
aac_consumer_free(dev, q, HostNormCmdQueue);
drivers/scsi/aacraid/dpcsup.c
202
wake_up_interruptible(&q->cmdready);
drivers/scsi/aacraid/dpcsup.c
204
aac_consumer_free(dev, q, HostNormCmdQueue);
drivers/scsi/aacraid/dpcsup.c
205
spin_unlock_irqrestore(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
211
spin_lock_irqsave(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
214
spin_unlock_irqrestore(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
278
struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
drivers/scsi/aacraid/dpcsup.c
309
spin_lock_irqsave(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
310
list_add_tail(&fib->fiblink, &q->cmdq);
drivers/scsi/aacraid/dpcsup.c
311
wake_up_interruptible(&q->cmdready);
drivers/scsi/aacraid/dpcsup.c
312
spin_unlock_irqrestore(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
39
unsigned int aac_response_normal(struct aac_queue * q)
drivers/scsi/aacraid/dpcsup.c
41
struct aac_dev * dev = q->dev;
drivers/scsi/aacraid/dpcsup.c
48
spin_lock_irqsave(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
55
while(aac_consumer_get(dev, q, &entry))
drivers/scsi/aacraid/dpcsup.c
63
aac_consumer_free(dev, q, HostNormRespQueue);
drivers/scsi/aacraid/dpcsup.c
75
spin_unlock_irqrestore(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
78
spin_lock_irqsave(q->lock, flags);
drivers/scsi/aacraid/dpcsup.c
81
spin_unlock_irqrestore(q->lock, flags);
drivers/scsi/aacraid/rx.c
401
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
drivers/scsi/aacraid/rx.c
407
atomic_inc(&q->numpending);
drivers/scsi/aacraid/rx.c
408
*(q->headers.producer) = cpu_to_le32(Index + 1);
drivers/scsi/aacraid/rx.c
424
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
drivers/scsi/aacraid/rx.c
430
atomic_inc(&q->numpending);
drivers/scsi/aacraid/rx.c
438
atomic_dec(&q->numpending);
drivers/scsi/aacraid/src.c
486
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
drivers/scsi/aacraid/src.c
497
atomic_inc(&q->numpending);
drivers/scsi/advansys.c
2484
static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q)
drivers/scsi/advansys.c
2489
printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q);
drivers/scsi/advansys.c
2493
q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag,
drivers/scsi/advansys.c
2494
q->q2.tag_code);
drivers/scsi/advansys.c
2498
(ulong)le32_to_cpu(q->q1.data_addr),
drivers/scsi/advansys.c
2499
(ulong)le32_to_cpu(q->q1.data_cnt),
drivers/scsi/advansys.c
2500
(ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len);
drivers/scsi/advansys.c
2503
(ulong)q->cdbptr, q->q2.cdb_len,
drivers/scsi/advansys.c
2504
(ulong)q->sg_head, q->q1.sg_queue_cnt);
drivers/scsi/advansys.c
2506
if (q->sg_head) {
drivers/scsi/advansys.c
2507
sgp = q->sg_head;
drivers/scsi/advansys.c
2523
static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q)
drivers/scsi/advansys.c
2525
printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q);
drivers/scsi/advansys.c
2527
q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len,
drivers/scsi/advansys.c
2528
q->d2.tag_code);
drivers/scsi/advansys.c
2531
q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg);
drivers/scsi/advansys.c
2562
static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q)
drivers/scsi/advansys.c
2568
printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q);
drivers/scsi/advansys.c
2571
q->target_id, q->target_lun, q->srb_tag);
drivers/scsi/advansys.c
2574
q->cntl, (ulong)le32_to_cpu(q->data_addr));
drivers/scsi/advansys.c
2577
(ulong)le32_to_cpu(q->data_cnt),
drivers/scsi/advansys.c
2578
(ulong)le32_to_cpu(q->sense_addr), q->sense_len);
drivers/scsi/advansys.c
2582
q->cdb_len, q->done_status, q->host_status, q->scsi_status);
drivers/scsi/advansys.c
2585
q->sg_working_ix, q->target_cmd);
drivers/scsi/advansys.c
2588
(ulong)le32_to_cpu(q->scsiq_rptr),
drivers/scsi/advansys.c
2589
(ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr);
drivers/scsi/advansys.c
2592
if (q->sg_list_ptr != NULL) {
drivers/scsi/advansys.c
2593
sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block);
drivers/scsi/arm/queue.c
111
QE_t *q;
drivers/scsi/arm/queue.c
121
q = list_entry(l, QE_t, list);
drivers/scsi/arm/queue.c
122
BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE));
drivers/scsi/arm/queue.c
124
SET_MAGIC(q, QUEUE_MAGIC_USED);
drivers/scsi/arm/queue.c
125
q->SCpnt = SCpnt;
drivers/scsi/arm/queue.c
140
QE_t *q;
drivers/scsi/arm/queue.c
146
q = list_entry(ent, QE_t, list);
drivers/scsi/arm/queue.c
147
BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED));
drivers/scsi/arm/queue.c
149
SET_MAGIC(q, QUEUE_MAGIC_FREE);
drivers/scsi/arm/queue.c
152
return q->SCpnt;
drivers/scsi/arm/queue.c
170
QE_t *q = list_entry(l, QE_t, list);
drivers/scsi/arm/queue.c
171
if (!test_bit(q->SCpnt->device->id * 8 +
drivers/scsi/arm/queue.c
172
(u8)(q->SCpnt->device->lun & 0x7), exclude)) {
drivers/scsi/arm/queue.c
219
QE_t *q = list_entry(l, QE_t, list);
drivers/scsi/arm/queue.c
220
if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
drivers/scsi/arm/queue.c
221
scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
drivers/scsi/arm/queue.c
245
QE_t *q = list_entry(l, QE_t, list);
drivers/scsi/arm/queue.c
246
if (q->SCpnt->device->id == target)
drivers/scsi/arm/queue.c
269
QE_t *q = list_entry(l, QE_t, list);
drivers/scsi/arm/queue.c
270
if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) {
drivers/scsi/arm/queue.c
295
QE_t *q = list_entry(l, QE_t, list);
drivers/scsi/arm/queue.c
296
if (q->SCpnt == SCpnt) {
drivers/scsi/arm/queue.c
43
#define SET_MAGIC(q,m) ((q)->magic = (m))
drivers/scsi/arm/queue.c
44
#define BAD_MAGIC(q,m) ((q)->magic != (m))
drivers/scsi/arm/queue.c
46
#define SET_MAGIC(q,m) do { } while (0)
drivers/scsi/arm/queue.c
47
#define BAD_MAGIC(q,m) (0)
drivers/scsi/arm/queue.c
62
QE_t *q;
drivers/scsi/arm/queue.c
74
queue->alloc = q = kmalloc_objs(QE_t, nqueues);
drivers/scsi/arm/queue.c
75
if (q) {
drivers/scsi/arm/queue.c
76
for (; nqueues; q++, nqueues--) {
drivers/scsi/arm/queue.c
77
SET_MAGIC(q, QUEUE_MAGIC_FREE);
drivers/scsi/arm/queue.c
78
q->SCpnt = NULL;
drivers/scsi/arm/queue.c
79
list_add(&q->list, &queue->free);
drivers/scsi/be2iscsi/be.h
51
static inline void *queue_head_node(struct be_queue_info *q)
drivers/scsi/be2iscsi/be.h
53
return q->dma_mem.va + q->head * q->entry_size;
drivers/scsi/be2iscsi/be.h
56
static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
drivers/scsi/be2iscsi/be.h
58
return q->dma_mem.va + wrb_num * q->entry_size;
drivers/scsi/be2iscsi/be.h
61
static inline void *queue_tail_node(struct be_queue_info *q)
drivers/scsi/be2iscsi/be.h
63
return q->dma_mem.va + q->tail * q->entry_size;
drivers/scsi/be2iscsi/be.h
66
static inline void queue_head_inc(struct be_queue_info *q)
drivers/scsi/be2iscsi/be.h
68
index_inc(&q->head, q->len);
drivers/scsi/be2iscsi/be.h
71
static inline void queue_tail_inc(struct be_queue_info *q)
drivers/scsi/be2iscsi/be.h
73
index_inc(&q->tail, q->len);
drivers/scsi/be2iscsi/be.h
89
struct be_queue_info q;
drivers/scsi/be2iscsi/be.h
97
struct be_queue_info q;
drivers/scsi/be2iscsi/be_cmds.c
143
struct be_queue_info *mccq = &ctrl->mcc_obj.q;
drivers/scsi/be2iscsi/be_cmds.c
173
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
drivers/scsi/be2iscsi/be_cmds.c
563
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
drivers/scsi/be2iscsi/be_cmds.c
900
int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
drivers/scsi/be2iscsi/be_cmds.c
91
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
drivers/scsi/be2iscsi/be_cmds.c
948
req->id = cpu_to_le16(q->id);
drivers/scsi/be2iscsi/be_cmds.h
810
int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
drivers/scsi/be2iscsi/be_main.c
1855
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
drivers/scsi/be2iscsi/be_main.c
2064
eq = &pbe_eq->q;
drivers/scsi/be2iscsi/be_main.c
2082
pbe_eq->q.id, ret);
drivers/scsi/be2iscsi/be_main.c
2084
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
drivers/scsi/be2iscsi/be_main.c
2987
static int be_fill_queue(struct be_queue_info *q,
drivers/scsi/be2iscsi/be_main.c
2990
struct be_dma_mem *mem = &q->dma_mem;
drivers/scsi/be2iscsi/be_main.c
2992
memset(q, 0, sizeof(*q));
drivers/scsi/be2iscsi/be_main.c
2993
q->len = len;
drivers/scsi/be2iscsi/be_main.c
2994
q->entry_size = entry_size;
drivers/scsi/be2iscsi/be_main.c
3021
eq = &phwi_context->be_eq[i].q;
drivers/scsi/be2iscsi/be_main.c
3052
phwi_context->be_eq[i].q.id);
drivers/scsi/be2iscsi/be_main.c
3058
eq = &phwi_context->be_eq[i].q;
drivers/scsi/be2iscsi/be_main.c
3084
eq = &phwi_context->be_eq[i].q;
drivers/scsi/be2iscsi/be_main.c
3312
static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
drivers/scsi/be2iscsi/be_main.c
3314
struct be_dma_mem *mem = &q->dma_mem;
drivers/scsi/be2iscsi/be_main.c
3322
static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
drivers/scsi/be2iscsi/be_main.c
3325
struct be_dma_mem *mem = &q->dma_mem;
drivers/scsi/be2iscsi/be_main.c
3327
memset(q, 0, sizeof(*q));
drivers/scsi/be2iscsi/be_main.c
3328
q->len = len;
drivers/scsi/be2iscsi/be_main.c
3329
q->entry_size = entry_size;
drivers/scsi/be2iscsi/be_main.c
3453
struct be_queue_info *q;
drivers/scsi/be2iscsi/be_main.c
3456
q = &phba->ctrl.mcc_obj.q;
drivers/scsi/be2iscsi/be_main.c
3498
if (q->created) {
drivers/scsi/be2iscsi/be_main.c
3499
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
drivers/scsi/be2iscsi/be_main.c
3500
be_queue_free(phba, q);
drivers/scsi/be2iscsi/be_main.c
3503
q = &phba->ctrl.mcc_obj.cq;
drivers/scsi/be2iscsi/be_main.c
3504
if (q->created) {
drivers/scsi/be2iscsi/be_main.c
3505
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
drivers/scsi/be2iscsi/be_main.c
3506
be_queue_free(phba, q);
drivers/scsi/be2iscsi/be_main.c
3513
struct be_queue_info *q, *cq;
drivers/scsi/be2iscsi/be_main.c
3524
&phwi_context->be_eq[phba->num_cpus].q,
drivers/scsi/be2iscsi/be_main.c
3528
if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
drivers/scsi/be2iscsi/be_main.c
3534
q = &phba->ctrl.mcc_obj.q;
drivers/scsi/be2iscsi/be_main.c
3535
if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
drivers/scsi/be2iscsi/be_main.c
3539
if (beiscsi_cmd_mccq_create(phba, q, cq))
drivers/scsi/be2iscsi/be_main.c
3545
be_queue_free(phba, q);
drivers/scsi/be2iscsi/be_main.c
3605
eq = &phwi_context->be_eq[i].q;
drivers/scsi/be2iscsi/be_main.c
3623
struct be_queue_info *q;
drivers/scsi/be2iscsi/be_main.c
3646
q = &phwi_context->be_wrbq[i];
drivers/scsi/be2iscsi/be_main.c
3647
if (q->created)
drivers/scsi/be2iscsi/be_main.c
3648
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
drivers/scsi/be2iscsi/be_main.c
3656
q = &phwi_context->be_def_hdrq[ulp_num];
drivers/scsi/be2iscsi/be_main.c
3657
if (q->created)
drivers/scsi/be2iscsi/be_main.c
3658
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
drivers/scsi/be2iscsi/be_main.c
3660
q = &phwi_context->be_def_dataq[ulp_num];
drivers/scsi/be2iscsi/be_main.c
3661
if (q->created)
drivers/scsi/be2iscsi/be_main.c
3662
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
drivers/scsi/be2iscsi/be_main.c
3669
q = &phwi_context->be_cq[i];
drivers/scsi/be2iscsi/be_main.c
3670
if (q->created) {
drivers/scsi/be2iscsi/be_main.c
3671
be_queue_free(phba, q);
drivers/scsi/be2iscsi/be_main.c
3672
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
drivers/scsi/be2iscsi/be_main.c
3682
q = &phwi_context->be_eq[i].q;
drivers/scsi/be2iscsi/be_main.c
3683
if (q->created) {
drivers/scsi/be2iscsi/be_main.c
3684
be_queue_free(phba, q);
drivers/scsi/be2iscsi/be_main.c
3685
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
drivers/scsi/be2iscsi/be_main.c
4100
eq = &phwi_context->be_eq[0].q;
drivers/scsi/be2iscsi/be_main.c
4107
eq = &phwi_context->be_eq[i].q;
drivers/scsi/be2iscsi/be_main.c
5213
set_eqd[num].eq_id = pbe_eq->q.id;
drivers/scsi/be2iscsi/be_main.c
693
eq = &pbe_eq->q;
drivers/scsi/be2iscsi/be_main.c
730
eq = &pbe_eq->q;
drivers/scsi/be2iscsi/be_main.c
769
eq = &phwi_context->be_eq[0].q;
drivers/scsi/bfa/bfa_core.c
1317
int q;
drivers/scsi/bfa/bfa_core.c
1319
for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
drivers/scsi/bfa/bfa_core.c
1320
bfa_reqq_ci(bfa, q) = 0;
drivers/scsi/bfa/bfa_core.c
1321
bfa_reqq_pi(bfa, q) = 0;
drivers/scsi/bfa/bfa_core.c
1322
bfa_rspq_ci(bfa, q) = 0;
drivers/scsi/bfa/bfa_core.c
1323
bfa_rspq_pi(bfa, q) = 0;
drivers/scsi/bfa/bfa_core.c
1473
int q, per_reqq_sz, per_rspq_sz;
drivers/scsi/bfa/bfa_core.c
1489
for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
drivers/scsi/bfa/bfa_core.c
1490
bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
drivers/scsi/bfa/bfa_core.c
1492
bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
drivers/scsi/bfa/bfa_core.c
1497
for (q = 0; q < cfg->fwcfg.num_cqs; q++)
drivers/scsi/bfa/bfa_cs.h
157
bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
drivers/scsi/bfa/bfa_cs.h
161
tqe = bfa_q_next(q);
drivers/scsi/bfa/bfa_cs.h
162
while (tqe != q) {
drivers/scsi/csiostor/csio_isr.c
428
struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
drivers/scsi/csiostor/csio_isr.c
431
entryp[k].desc, q);
drivers/scsi/csiostor/csio_isr.c
439
entryp[k].dev_id = q;
drivers/scsi/csiostor/csio_scsi.c
1159
csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
drivers/scsi/csiostor/csio_scsi.c
1167
list_for_each_safe(tmp, next, q) {
drivers/scsi/csiostor/csio_scsi.c
1233
csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
drivers/scsi/csiostor/csio_scsi.c
1240
if (list_empty(q))
drivers/scsi/csiostor/csio_scsi.c
1246
list_for_each_safe(tmp, next, q) {
drivers/scsi/csiostor/csio_scsi.c
1252
while (!list_empty(q) && count--) {
drivers/scsi/csiostor/csio_scsi.c
1259
if (list_empty(q))
drivers/scsi/csiostor/csio_wr.c
1001
if (q->pidx > q->cidx)
drivers/scsi/csiostor/csio_wr.c
1002
return q->pidx - q->cidx;
drivers/scsi/csiostor/csio_wr.c
1003
else if (q->cidx > q->pidx)
drivers/scsi/csiostor/csio_wr.c
1004
return q->credits - (q->cidx - q->pidx);
drivers/scsi/csiostor/csio_wr.c
1041
csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
drivers/scsi/csiostor/csio_wr.c
1053
struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
drivers/scsi/csiostor/csio_wr.c
1091
iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
drivers/scsi/csiostor/csio_wr.c
1110
csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
drivers/scsi/csiostor/csio_wr.c
1112
return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
drivers/scsi/csiostor/csio_wr.c
1127
csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
drivers/scsi/csiostor/csio_wr.c
1134
void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
drivers/scsi/csiostor/csio_wr.c
1138
struct csio_q *flq = csio_iq_has_fl(q) ?
drivers/scsi/csiostor/csio_wr.c
1139
wrm->q_arr[q->un.iq.flq_idx] : NULL;
drivers/scsi/csiostor/csio_wr.c
1144
(q->wr_sz - sizeof(*ftr)));
drivers/scsi/csiostor/csio_wr.c
1150
while (csio_is_new_iqwr(q, ftr)) {
drivers/scsi/csiostor/csio_wr.c
1152
CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
drivers/scsi/csiostor/csio_wr.c
1153
(uintptr_t)q->vwrap);
drivers/scsi/csiostor/csio_wr.c
1160
iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
drivers/scsi/csiostor/csio_wr.c
1163
csio_wr_process_fl(hw, q, wr,
drivers/scsi/csiostor/csio_wr.c
1193
CSIO_INC_STATS(q, n_rsp_unknown);
drivers/scsi/csiostor/csio_wr.c
1202
if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
drivers/scsi/csiostor/csio_wr.c
1205
q->cidx = 0;
drivers/scsi/csiostor/csio_wr.c
1206
wr = q->vstart;
drivers/scsi/csiostor/csio_wr.c
1209
q->un.iq.genbit ^= 0x1;
drivers/scsi/csiostor/csio_wr.c
1211
CSIO_INC_STATS(q, n_qwrap);
drivers/scsi/csiostor/csio_wr.c
1213
q->cidx++;
drivers/scsi/csiostor/csio_wr.c
1214
wr = (void *)((uintptr_t)(q->vstart) +
drivers/scsi/csiostor/csio_wr.c
1215
(q->cidx * q->wr_sz));
drivers/scsi/csiostor/csio_wr.c
1219
(q->wr_sz - sizeof(*ftr)));
drivers/scsi/csiostor/csio_wr.c
1220
q->inc_idx++;
drivers/scsi/csiostor/csio_wr.c
1228
if (unlikely(!q->inc_idx)) {
drivers/scsi/csiostor/csio_wr.c
1229
CSIO_INC_STATS(q, n_stray_comp);
drivers/scsi/csiostor/csio_wr.c
1249
csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) |
drivers/scsi/csiostor/csio_wr.c
1250
INGRESSQID_V(q->un.iq.physiqid) |
drivers/scsi/csiostor/csio_wr.c
1253
q->stats.n_tot_rsps += q->inc_idx;
drivers/scsi/csiostor/csio_wr.c
1255
q->inc_idx = 0;
drivers/scsi/csiostor/csio_wr.c
1689
struct csio_q *q;
drivers/scsi/csiostor/csio_wr.c
1693
q = wrm->q_arr[i];
drivers/scsi/csiostor/csio_wr.c
1696
if (q->type == CSIO_FREELIST) {
drivers/scsi/csiostor/csio_wr.c
1697
if (!q->un.fl.bufs)
drivers/scsi/csiostor/csio_wr.c
1699
for (j = 0; j < q->credits; j++) {
drivers/scsi/csiostor/csio_wr.c
1700
buf = &q->un.fl.bufs[j];
drivers/scsi/csiostor/csio_wr.c
1707
kfree(q->un.fl.bufs);
drivers/scsi/csiostor/csio_wr.c
1709
dma_free_coherent(&hw->pdev->dev, q->size,
drivers/scsi/csiostor/csio_wr.c
1710
q->vstart, q->pstart);
drivers/scsi/csiostor/csio_wr.c
1712
kfree(q);
drivers/scsi/csiostor/csio_wr.c
191
struct csio_q *q, *flq;
drivers/scsi/csiostor/csio_wr.c
234
q = wrm->q_arr[free_idx];
drivers/scsi/csiostor/csio_wr.c
236
q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
drivers/scsi/csiostor/csio_wr.c
238
if (!q->vstart) {
drivers/scsi/csiostor/csio_wr.c
245
q->type = type;
drivers/scsi/csiostor/csio_wr.c
246
q->owner = owner;
drivers/scsi/csiostor/csio_wr.c
247
q->pidx = q->cidx = q->inc_idx = 0;
drivers/scsi/csiostor/csio_wr.c
248
q->size = qsz;
drivers/scsi/csiostor/csio_wr.c
249
q->wr_sz = wrsize; /* If using fixed size WRs */
drivers/scsi/csiostor/csio_wr.c
255
q->un.iq.genbit = 1;
drivers/scsi/csiostor/csio_wr.c
261
q->credits = (qsz - q->wr_sz) / q->wr_sz;
drivers/scsi/csiostor/csio_wr.c
262
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
drivers/scsi/csiostor/csio_wr.c
263
- q->wr_sz);
drivers/scsi/csiostor/csio_wr.c
278
q->un.iq.flq_idx = flq_idx;
drivers/scsi/csiostor/csio_wr.c
280
flq = wrm->q_arr[q->un.iq.flq_idx];
drivers/scsi/csiostor/csio_wr.c
305
q->un.iq.flq_idx = -1;
drivers/scsi/csiostor/csio_wr.c
309
q->un.iq.iq_intx_handler = iq_intx_handler;
drivers/scsi/csiostor/csio_wr.c
314
q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
drivers/scsi/csiostor/csio_wr.c
315
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
drivers/scsi/csiostor/csio_wr.c
319
q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
drivers/scsi/csiostor/csio_wr.c
320
q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
drivers/scsi/csiostor/csio_wr.c
746
struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
drivers/scsi/csiostor/csio_wr.c
747
struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
drivers/scsi/csiostor/csio_wr.c
764
struct csio_q *q = wrm->q_arr[qidx];
drivers/scsi/csiostor/csio_wr.c
770
q->un.iq.genbit = 1;
drivers/scsi/csiostor/csio_wr.c
772
for (i = 0; i < q->credits; i++) {
drivers/scsi/csiostor/csio_wr.c
774
wr = (void *)((uintptr_t)q->vstart +
drivers/scsi/csiostor/csio_wr.c
775
(i * q->wr_sz));
drivers/scsi/csiostor/csio_wr.c
778
(q->wr_sz - sizeof(*ftr)));
drivers/scsi/csiostor/csio_wr.c
788
struct csio_q *q;
drivers/scsi/csiostor/csio_wr.c
793
q = wrm->q_arr[i];
drivers/scsi/csiostor/csio_wr.c
795
switch (q->type) {
drivers/scsi/csiostor/csio_wr.c
866
struct csio_q *q = wrm->q_arr[qidx];
drivers/scsi/csiostor/csio_wr.c
867
void *cwr = (void *)((uintptr_t)(q->vstart) +
drivers/scsi/csiostor/csio_wr.c
868
(q->pidx * CSIO_QCREDIT_SZ));
drivers/scsi/csiostor/csio_wr.c
869
struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
drivers/scsi/csiostor/csio_wr.c
870
uint16_t cidx = q->cidx = ntohs(stp->cidx);
drivers/scsi/csiostor/csio_wr.c
871
uint16_t pidx = q->pidx;
drivers/scsi/csiostor/csio_wr.c
876
CSIO_DB_ASSERT(q->owner != NULL);
drivers/scsi/csiostor/csio_wr.c
878
CSIO_DB_ASSERT(cidx <= q->credits);
drivers/scsi/csiostor/csio_wr.c
882
credits = q->credits - (pidx - cidx) - 1;
drivers/scsi/csiostor/csio_wr.c
887
credits = q->credits;
drivers/scsi/csiostor/csio_wr.c
888
CSIO_INC_STATS(q, n_qempty);
drivers/scsi/csiostor/csio_wr.c
896
CSIO_INC_STATS(q, n_qfull);
drivers/scsi/csiostor/csio_wr.c
908
if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
drivers/scsi/csiostor/csio_wr.c
910
wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
drivers/scsi/csiostor/csio_wr.c
911
wrp->addr2 = q->vstart;
drivers/scsi/csiostor/csio_wr.c
913
q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
drivers/scsi/csiostor/csio_wr.c
915
CSIO_INC_STATS(q, n_qwrap);
drivers/scsi/csiostor/csio_wr.c
916
CSIO_INC_STATS(q, n_eq_wr_split);
drivers/scsi/csiostor/csio_wr.c
922
q->pidx += (uint16_t)req_credits;
drivers/scsi/csiostor/csio_wr.c
925
if (unlikely(q->pidx == q->credits)) {
drivers/scsi/csiostor/csio_wr.c
926
q->pidx = 0;
drivers/scsi/csiostor/csio_wr.c
927
CSIO_INC_STATS(q, n_qwrap);
drivers/scsi/csiostor/csio_wr.c
931
q->inc_idx = (uint16_t)req_credits;
drivers/scsi/csiostor/csio_wr.c
933
CSIO_INC_STATS(q, n_tot_reqs);
drivers/scsi/csiostor/csio_wr.c
984
struct csio_q *q = wrm->q_arr[qidx];
drivers/scsi/csiostor/csio_wr.c
990
csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) |
drivers/scsi/csiostor/csio_wr.c
991
PIDX_T5_V(q->inc_idx) | DBTYPE_F,
drivers/scsi/csiostor/csio_wr.c
993
q->inc_idx = 0;
drivers/scsi/csiostor/csio_wr.c
999
csio_wr_avail_qcredits(struct csio_q *q)
drivers/scsi/elx/efct/efct_hw_queues.c
406
struct hw_q *q;
drivers/scsi/elx/efct/efct_hw_queues.c
412
list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
drivers/scsi/elx/efct/efct_hw_queues.c
413
switch (q->type) {
drivers/scsi/elx/efct/efct_hw_queues.c
415
efct_hw_del_mq((struct hw_mq *)q);
drivers/scsi/elx/efct/efct_hw_queues.c
418
efct_hw_del_wq((struct hw_wq *)q);
drivers/scsi/elx/efct/efct_hw_queues.c
421
efct_hw_del_rq((struct hw_rq *)q);
drivers/scsi/elx/libefc_sli/sli4.c
1002
if (!q) {
drivers/scsi/elx/libefc_sli/sli4.c
1003
efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q);
drivers/scsi/elx/libefc_sli/sli4.c
1010
switch (q->type) {
drivers/scsi/elx/libefc_sli/sli4.c
1032
efc_log_info(sli4, "bad queue type %d\n", q->type);
drivers/scsi/elx/libefc_sli/sli4.c
1037
rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id);
drivers/scsi/elx/libefc_sli/sli4.c
1053
SLI4_QNAME[q->type], res->status,
drivers/scsi/elx/libefc_sli/sli4.c
1061
__sli_queue_destroy(sli4, q);
drivers/scsi/elx/libefc_sli/sli4.c
1067
sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
drivers/scsi/elx/libefc_sli/sli4.c
1073
spin_lock_irqsave(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1075
val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
drivers/scsi/elx/libefc_sli/sli4.c
1077
val = sli_format_eq_db_data(q->n_posted, q->id, a);
drivers/scsi/elx/libefc_sli/sli4.c
1079
writel(val, q->db_regaddr);
drivers/scsi/elx/libefc_sli/sli4.c
1080
q->n_posted = 0;
drivers/scsi/elx/libefc_sli/sli4.c
1081
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1087
sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
drivers/scsi/elx/libefc_sli/sli4.c
1093
spin_lock_irqsave(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1095
switch (q->type) {
drivers/scsi/elx/libefc_sli/sli4.c
1098
val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
drivers/scsi/elx/libefc_sli/sli4.c
1100
val = sli_format_eq_db_data(q->n_posted, q->id, a);
drivers/scsi/elx/libefc_sli/sli4.c
1102
writel(val, q->db_regaddr);
drivers/scsi/elx/libefc_sli/sli4.c
1103
q->n_posted = 0;
drivers/scsi/elx/libefc_sli/sli4.c
1107
val = sli_format_if6_cq_db_data(q->n_posted, q->id, a);
drivers/scsi/elx/libefc_sli/sli4.c
1109
val = sli_format_cq_db_data(q->n_posted, q->id, a);
drivers/scsi/elx/libefc_sli/sli4.c
1111
writel(val, q->db_regaddr);
drivers/scsi/elx/libefc_sli/sli4.c
1112
q->n_posted = 0;
drivers/scsi/elx/libefc_sli/sli4.c
1116
SLI4_QNAME[q->type]);
drivers/scsi/elx/libefc_sli/sli4.c
1119
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1125
sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
drivers/scsi/elx/libefc_sli/sli4.c
1127
u8 *qe = q->dma.virt;
drivers/scsi/elx/libefc_sli/sli4.c
1131
qindex = q->index;
drivers/scsi/elx/libefc_sli/sli4.c
1132
qe += q->index * q->size;
drivers/scsi/elx/libefc_sli/sli4.c
1135
sli_set_wq_id_association(entry, q->id);
drivers/scsi/elx/libefc_sli/sli4.c
1137
memcpy(qe, entry, q->size);
drivers/scsi/elx/libefc_sli/sli4.c
1138
val = sli_format_wq_db_data(q->id);
drivers/scsi/elx/libefc_sli/sli4.c
1140
writel(val, q->db_regaddr);
drivers/scsi/elx/libefc_sli/sli4.c
1141
q->index = (q->index + 1) & (q->length - 1);
drivers/scsi/elx/libefc_sli/sli4.c
1147
sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
drivers/scsi/elx/libefc_sli/sli4.c
1149
u8 *qe = q->dma.virt;
drivers/scsi/elx/libefc_sli/sli4.c
1154
spin_lock_irqsave(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1155
qindex = q->index;
drivers/scsi/elx/libefc_sli/sli4.c
1156
qe += q->index * q->size;
drivers/scsi/elx/libefc_sli/sli4.c
1158
memcpy(qe, entry, q->size);
drivers/scsi/elx/libefc_sli/sli4.c
1159
val = sli_format_mq_db_data(q->id);
drivers/scsi/elx/libefc_sli/sli4.c
1160
writel(val, q->db_regaddr);
drivers/scsi/elx/libefc_sli/sli4.c
1161
q->index = (q->index + 1) & (q->length - 1);
drivers/scsi/elx/libefc_sli/sli4.c
1162
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1168
sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
drivers/scsi/elx/libefc_sli/sli4.c
1170
u8 *qe = q->dma.virt;
drivers/scsi/elx/libefc_sli/sli4.c
1174
qindex = q->index;
drivers/scsi/elx/libefc_sli/sli4.c
1175
qe += q->index * q->size;
drivers/scsi/elx/libefc_sli/sli4.c
1177
memcpy(qe, entry, q->size);
drivers/scsi/elx/libefc_sli/sli4.c
1185
if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR))
drivers/scsi/elx/libefc_sli/sli4.c
1188
val = sli_format_rq_db_data(q->id);
drivers/scsi/elx/libefc_sli/sli4.c
1189
writel(val, q->db_regaddr);
drivers/scsi/elx/libefc_sli/sli4.c
1191
q->index = (q->index + 1) & (q->length - 1);
drivers/scsi/elx/libefc_sli/sli4.c
1197
sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
drivers/scsi/elx/libefc_sli/sli4.c
1199
u8 *qe = q->dma.virt;
drivers/scsi/elx/libefc_sli/sli4.c
1203
spin_lock_irqsave(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1205
qe += q->index * q->size;
drivers/scsi/elx/libefc_sli/sli4.c
1210
if ((wflags & SLI4_EQE_VALID) != q->phase) {
drivers/scsi/elx/libefc_sli/sli4.c
1211
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1220
memcpy(entry, qe, q->size);
drivers/scsi/elx/libefc_sli/sli4.c
1221
q->index = (q->index + 1) & (q->length - 1);
drivers/scsi/elx/libefc_sli/sli4.c
1222
q->n_posted++;
drivers/scsi/elx/libefc_sli/sli4.c
1230
if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
drivers/scsi/elx/libefc_sli/sli4.c
1231
q->phase ^= (u16)0x1;
drivers/scsi/elx/libefc_sli/sli4.c
1233
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1239
sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
drivers/scsi/elx/libefc_sli/sli4.c
1241
u8 *qe = q->dma.virt;
drivers/scsi/elx/libefc_sli/sli4.c
1246
spin_lock_irqsave(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1248
qe += q->index * q->size;
drivers/scsi/elx/libefc_sli/sli4.c
1254
if (valid_bit_set != q->phase) {
drivers/scsi/elx/libefc_sli/sli4.c
1255
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1264
memcpy(entry, qe, q->size);
drivers/scsi/elx/libefc_sli/sli4.c
1265
q->index = (q->index + 1) & (q->length - 1);
drivers/scsi/elx/libefc_sli/sli4.c
1266
q->n_posted++;
drivers/scsi/elx/libefc_sli/sli4.c
1274
if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
drivers/scsi/elx/libefc_sli/sli4.c
1275
q->phase ^= (u16)0x1;
drivers/scsi/elx/libefc_sli/sli4.c
1277
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1283
sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
drivers/scsi/elx/libefc_sli/sli4.c
1285
u8 *qe = q->dma.virt;
drivers/scsi/elx/libefc_sli/sli4.c
1288
spin_lock_irqsave(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1290
qe += q->u.r_idx * q->size;
drivers/scsi/elx/libefc_sli/sli4.c
1293
if (q->index == q->u.r_idx) {
drivers/scsi/elx/libefc_sli/sli4.c
1294
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
1298
memcpy(entry, qe, q->size);
drivers/scsi/elx/libefc_sli/sli4.c
1299
q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1);
drivers/scsi/elx/libefc_sli/sli4.c
1301
spin_unlock_irqrestore(&q->lock, flags);
drivers/scsi/elx/libefc_sli/sli4.c
3628
struct sli4_queue *q, int num_q, u32 shift,
drivers/scsi/elx/libefc_sli/sli4.c
3645
req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id);
drivers/scsi/elx/libefc_sli/sli4.c
4117
enum sli4_qtype q;
drivers/scsi/elx/libefc_sli/sli4.c
4120
for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
drivers/scsi/elx/libefc_sli/sli4.c
4121
sli4->qinfo.max_qentries[q] =
drivers/scsi/elx/libefc_sli/sli4.c
4122
sli_convert_mask_to_count(sli4->qinfo.count_method[q],
drivers/scsi/elx/libefc_sli/sli4.c
4123
sli4->qinfo.count_mask[q]);
drivers/scsi/elx/libefc_sli/sli4.c
4130
for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
drivers/scsi/elx/libefc_sli/sli4.c
4131
qentries = sli4->qinfo.max_qentries[q];
drivers/scsi/elx/libefc_sli/sli4.c
4134
SLI4_QNAME[q],
drivers/scsi/elx/libefc_sli/sli4.c
4135
sli4->qinfo.max_qentries[q], qentries);
drivers/scsi/elx/libefc_sli/sli4.c
4136
sli4->qinfo.max_qentries[q] = qentries;
drivers/scsi/elx/libefc_sli/sli4.c
488
__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
drivers/scsi/elx/libefc_sli/sli4.c
490
if (!q->dma.size)
drivers/scsi/elx/libefc_sli/sli4.c
493
dma_free_coherent(&sli4->pci->dev, q->dma.size,
drivers/scsi/elx/libefc_sli/sli4.c
494
q->dma.virt, q->dma.phys);
drivers/scsi/elx/libefc_sli/sli4.c
495
memset(&q->dma, 0, sizeof(struct efc_dma));
drivers/scsi/elx/libefc_sli/sli4.c
499
__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
drivers/scsi/elx/libefc_sli/sli4.c
502
if (q->dma.virt) {
drivers/scsi/elx/libefc_sli/sli4.c
507
memset(q, 0, sizeof(struct sli4_queue));
drivers/scsi/elx/libefc_sli/sli4.c
509
q->dma.size = size * n_entries;
drivers/scsi/elx/libefc_sli/sli4.c
510
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
drivers/scsi/elx/libefc_sli/sli4.c
511
&q->dma.phys, GFP_KERNEL);
drivers/scsi/elx/libefc_sli/sli4.c
512
if (!q->dma.virt) {
drivers/scsi/elx/libefc_sli/sli4.c
513
memset(&q->dma, 0, sizeof(struct efc_dma));
drivers/scsi/elx/libefc_sli/sli4.c
518
memset(q->dma.virt, 0, size * n_entries);
drivers/scsi/elx/libefc_sli/sli4.c
520
spin_lock_init(&q->lock);
drivers/scsi/elx/libefc_sli/sli4.c
522
q->type = qtype;
drivers/scsi/elx/libefc_sli/sli4.c
523
q->size = size;
drivers/scsi/elx/libefc_sli/sli4.c
524
q->length = n_entries;
drivers/scsi/elx/libefc_sli/sli4.c
526
if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) {
drivers/scsi/elx/libefc_sli/sli4.c
530
q->phase = 1;
drivers/scsi/elx/libefc_sli/sli4.c
534
q->proc_limit = n_entries / 2;
drivers/scsi/elx/libefc_sli/sli4.c
536
if (q->type == SLI4_QTYPE_EQ)
drivers/scsi/elx/libefc_sli/sli4.c
537
q->posted_limit = q->length / 2;
drivers/scsi/elx/libefc_sli/sli4.c
539
q->posted_limit = 64;
drivers/scsi/elx/libefc_sli/sli4.c
545
sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
drivers/scsi/elx/libefc_sli/sli4.c
549
if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE,
drivers/scsi/elx/libefc_sli/sli4.c
553
if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
drivers/scsi/elx/libefc_sli/sli4.c
557
if (__sli_create_queue(sli4, q))
drivers/scsi/elx/libefc_sli/sli4.c
560
if (is_hdr && q->id & 1) {
drivers/scsi/elx/libefc_sli/sli4.c
561
efc_log_info(sli4, "bad header RQ_ID %d\n", q->id);
drivers/scsi/elx/libefc_sli/sli4.c
563
} else if (!is_hdr && (q->id & 1) == 0) {
drivers/scsi/elx/libefc_sli/sli4.c
564
efc_log_info(sli4, "bad data RQ_ID %d\n", q->id);
drivers/scsi/elx/libefc_sli/sli4.c
569
q->u.flag |= SLI4_QUEUE_FLAG_HDR;
drivers/scsi/elx/libefc_sli/sli4.c
571
q->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
drivers/scsi/elx/libefc_sli/sli4.c
576
__sli_queue_destroy(sli4, q);
drivers/scsi/elx/libefc_sli/sli4.c
672
__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
drivers/scsi/elx/libefc_sli/sli4.c
678
SLI4_QNAME[q->type]);
drivers/scsi/elx/libefc_sli/sli4.c
683
SLI4_QNAME[q->type]);
drivers/scsi/elx/libefc_sli/sli4.c
691
SLI4_QNAME[q->type], res_q->hdr.status,
drivers/scsi/elx/libefc_sli/sli4.c
695
q->id = le16_to_cpu(res_q->q_id);
drivers/scsi/elx/libefc_sli/sli4.c
696
switch (q->type) {
drivers/scsi/elx/libefc_sli/sli4.c
699
q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
701
q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
705
q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
707
q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
711
q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
713
q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
717
q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
719
q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
723
q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
725
q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG;
drivers/scsi/elx/libefc_sli/sli4.c
764
struct sli4_queue *q, u32 n_entries,
drivers/scsi/elx/libefc_sli/sli4.c
776
if (__sli_queue_init(sli4, q, qtype, size, n_entries, align))
drivers/scsi/elx/libefc_sli/sli4.c
781
if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
drivers/scsi/elx/libefc_sli/sli4.c
782
!__sli_create_queue(sli4, q))
drivers/scsi/elx/libefc_sli/sli4.c
787
if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
drivers/scsi/elx/libefc_sli/sli4.c
789
!__sli_create_queue(sli4, q))
drivers/scsi/elx/libefc_sli/sli4.c
796
&q->dma, assoc->id) &&
drivers/scsi/elx/libefc_sli/sli4.c
797
!__sli_create_queue(sli4, q))
drivers/scsi/elx/libefc_sli/sli4.c
802
if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
drivers/scsi/elx/libefc_sli/sli4.c
804
!__sli_create_queue(sli4, q))
drivers/scsi/elx/libefc_sli/sli4.c
812
__sli_queue_destroy(sli4, q);
drivers/scsi/elx/libefc_sli/sli4.c
995
sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
drivers/scsi/elx/libefc_sli/sli4.h
3981
__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
drivers/scsi/elx/libefc_sli/sli4.h
3984
__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q);
drivers/scsi/elx/libefc_sli/sli4.h
3989
sli_queue_alloc(struct sli4 *sli4, u32 qtype, struct sli4_queue *q,
drivers/scsi/elx/libefc_sli/sli4.h
3997
sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, u32 destroy_queues,
drivers/scsi/elx/libefc_sli/sli4.h
4000
sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
drivers/scsi/elx/libefc_sli/sli4.h
4002
sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
drivers/scsi/elx/libefc_sli/sli4.h
4005
sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
drivers/scsi/elx/libefc_sli/sli4.h
4007
sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
drivers/scsi/elx/libefc_sli/sli4.h
4009
sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
drivers/scsi/elx/libefc_sli/sli4.h
4011
sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
drivers/scsi/elx/libefc_sli/sli4.h
4013
sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
drivers/scsi/elx/libefc_sli/sli4.h
4015
sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
drivers/scsi/elx/libefc_sli/sli4.h
4056
sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, u32 n_entries,
drivers/scsi/elx/libefc_sli/sli4.h
4059
sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, struct sli4_queue *q[],
drivers/scsi/esas2r/esas2r_flash.c
331
u8 *p, *q;
drivers/scsi/esas2r/esas2r_flash.c
347
q = (u8 *)fi /* start of the whole gob */
drivers/scsi/esas2r/esas2r_flash.c
357
if (*p++ != *q++)
drivers/scsi/gvp11.c
242
unsigned char q, qq;
drivers/scsi/gvp11.c
261
q = *sasr_3393; /* read it */
drivers/scsi/gvp11.c
262
if (q & 0x08) /* bit 3 should always be clear */
drivers/scsi/gvp11.c
269
if (*sasr_3393 != q) { /* should still read the same */
drivers/scsi/gvp11.c
273
if (*scmd_3393 != q) /* and so should the image at 0x1f */
drivers/scsi/gvp11.c
283
q = *scmd_3393;
drivers/scsi/gvp11.c
285
*scmd_3393 = ~q;
drivers/scsi/gvp11.c
289
*scmd_3393 = q;
drivers/scsi/gvp11.c
290
if (qq != q) /* should be read only */
drivers/scsi/gvp11.c
293
q = *scmd_3393;
drivers/scsi/gvp11.c
295
*scmd_3393 = ~q;
drivers/scsi/gvp11.c
299
*scmd_3393 = q;
drivers/scsi/gvp11.c
300
if (qq != q || qq != 0xff) /* should be read only, all 1's */
drivers/scsi/gvp11.c
303
q = *scmd_3393;
drivers/scsi/gvp11.c
305
*scmd_3393 = ~q;
drivers/scsi/gvp11.c
309
*scmd_3393 = q;
drivers/scsi/gvp11.c
310
if (qq != (~q & 0xff)) /* should be read/write */
drivers/scsi/hpsa.c
302
static inline u32 next_command(struct ctlr_info *h, u8 q);
drivers/scsi/hpsa.c
6920
static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
drivers/scsi/hpsa.c
6922
return h->access.command_completed(h, q);
drivers/scsi/hpsa.c
6996
return container_of((queue - *queue), struct ctlr_info, q[0]);
drivers/scsi/hpsa.c
7002
u8 q = *(u8 *) queue;
drivers/scsi/hpsa.c
7012
raw_tag = get_next_completion(h, q);
drivers/scsi/hpsa.c
7014
raw_tag = next_command(h, q);
drivers/scsi/hpsa.c
7023
u8 q = *(u8 *) queue;
drivers/scsi/hpsa.c
7029
raw_tag = get_next_completion(h, q);
drivers/scsi/hpsa.c
7031
raw_tag = next_command(h, q);
drivers/scsi/hpsa.c
7039
u8 q = *(u8 *) queue;
drivers/scsi/hpsa.c
7045
raw_tag = get_next_completion(h, q);
drivers/scsi/hpsa.c
7048
raw_tag = next_command(h, q);
drivers/scsi/hpsa.c
7058
u8 q = *(u8 *) queue;
drivers/scsi/hpsa.c
7061
raw_tag = get_next_completion(h, q);
drivers/scsi/hpsa.c
7064
raw_tag = next_command(h, q);
drivers/scsi/hpsa.c
8069
&h->q[h->intr_mode]);
drivers/scsi/hpsa.c
8070
h->q[h->intr_mode] = 0;
drivers/scsi/hpsa.c
8075
free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
drivers/scsi/hpsa.c
8076
h->q[i] = 0;
drivers/scsi/hpsa.c
8079
h->q[i] = 0;
drivers/scsi/hpsa.c
8098
h->q[i] = (u8) i;
drivers/scsi/hpsa.c
8106
&h->q[i]);
drivers/scsi/hpsa.c
8114
free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
drivers/scsi/hpsa.c
8115
h->q[j] = 0;
drivers/scsi/hpsa.c
8118
h->q[j] = 0;
drivers/scsi/hpsa.c
8130
&h->q[h->intr_mode]);
drivers/scsi/hpsa.c
8137
&h->q[h->intr_mode]);
drivers/scsi/hpsa.c
987
static inline u32 next_command(struct ctlr_info *h, u8 q)
drivers/scsi/hpsa.c
990
struct reply_queue_buffer *rq = &h->reply_queue[q];
drivers/scsi/hpsa.c
993
return h->access.command_completed(h, q);
drivers/scsi/hpsa.c
996
return h->access.command_completed(h, q);
drivers/scsi/hpsa.h
257
u8 q[MAX_REPLY_QUEUES];
drivers/scsi/hpsa.h
35
unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
drivers/scsi/hpsa.h
489
static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
drivers/scsi/hpsa.h
491
struct reply_queue_buffer *rq = &h->reply_queue[q];
drivers/scsi/hpsa.h
527
__attribute__((unused)) u8 q)
drivers/scsi/hpsa.h
590
static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
drivers/scsi/hpsa.h
593
struct reply_queue_buffer *rq = &h->reply_queue[q];
drivers/scsi/hpsa.h
595
BUG_ON(q >= h->nreply_queues);
drivers/scsi/hpsa.h
609
writel((q << 24) | rq->current_entry, h->vaddr +
drivers/scsi/ibmvscsi_tgt/libsrp.c
24
static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
drivers/scsi/ibmvscsi_tgt/libsrp.c
30
q->pool = kzalloc_objs(struct iu_entry *, max);
drivers/scsi/ibmvscsi_tgt/libsrp.c
31
if (!q->pool)
drivers/scsi/ibmvscsi_tgt/libsrp.c
33
q->items = kzalloc_objs(struct iu_entry, max);
drivers/scsi/ibmvscsi_tgt/libsrp.c
34
if (!q->items)
drivers/scsi/ibmvscsi_tgt/libsrp.c
37
spin_lock_init(&q->lock);
drivers/scsi/ibmvscsi_tgt/libsrp.c
38
kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
drivers/scsi/ibmvscsi_tgt/libsrp.c
40
for (i = 0, iue = q->items; i < max; i++) {
drivers/scsi/ibmvscsi_tgt/libsrp.c
41
kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
drivers/scsi/ibmvscsi_tgt/libsrp.c
48
kfree(q->pool);
drivers/scsi/ibmvscsi_tgt/libsrp.c
52
static void srp_iu_pool_free(struct srp_queue *q)
drivers/scsi/ibmvscsi_tgt/libsrp.c
54
kfree(q->items);
drivers/scsi/ibmvscsi_tgt/libsrp.c
55
kfree(q->pool);
drivers/scsi/ips.c
2513
struct scsi_cmnd *q;
drivers/scsi/ips.c
2624
q = p;
drivers/scsi/ips.c
2625
SC = ips_removeq_wait(&ha->scb_waitlist, q);
drivers/scsi/libiscsi.c
2771
iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
drivers/scsi/libiscsi.c
2775
memset(q, 0, sizeof(*q));
drivers/scsi/libiscsi.c
2777
q->max = max;
drivers/scsi/libiscsi.c
2783
q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL);
drivers/scsi/libiscsi.c
2784
if (q->pool == NULL)
drivers/scsi/libiscsi.c
2787
kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
drivers/scsi/libiscsi.c
2790
q->pool[i] = kzalloc(item_size, GFP_KERNEL);
drivers/scsi/libiscsi.c
2791
if (q->pool[i] == NULL) {
drivers/scsi/libiscsi.c
2792
q->max = i;
drivers/scsi/libiscsi.c
2795
kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
drivers/scsi/libiscsi.c
2799
*items = q->pool + max;
drivers/scsi/libiscsi.c
2800
memcpy(*items, q->pool, max * sizeof(void *));
drivers/scsi/libiscsi.c
2806
iscsi_pool_free(q);
drivers/scsi/libiscsi.c
2811
void iscsi_pool_free(struct iscsi_pool *q)
drivers/scsi/libiscsi.c
2815
for (i = 0; i < q->max; i++)
drivers/scsi/libiscsi.c
2816
kfree(q->pool[i]);
drivers/scsi/libiscsi.c
2817
kvfree(q->pool);
drivers/scsi/lpfc/lpfc_attr.c
1471
lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
drivers/scsi/lpfc/lpfc_attr.c
1476
while (!list_empty(q)) {
drivers/scsi/lpfc/lpfc_crtn.h
257
void lpfc_sli4_start_polling(struct lpfc_queue *q);
drivers/scsi/lpfc/lpfc_crtn.h
258
void lpfc_sli4_stop_polling(struct lpfc_queue *q);
drivers/scsi/lpfc/lpfc_debugfs.c
4210
lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count)
drivers/scsi/lpfc/lpfc_debugfs.c
4215
if (index > q->entry_count - 1)
drivers/scsi/lpfc/lpfc_debugfs.h
355
lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
drivers/scsi/lpfc/lpfc_debugfs.h
362
if (!q)
drivers/scsi/lpfc/lpfc_debugfs.h
364
if (idx >= q->entry_count)
drivers/scsi/lpfc/lpfc_debugfs.h
367
esize = q->entry_size;
drivers/scsi/lpfc/lpfc_debugfs.h
369
pword = lpfc_sli4_qe(q, idx);
drivers/scsi/lpfc/lpfc_debugfs.h
403
lpfc_debug_dump_q(struct lpfc_queue *q)
drivers/scsi/lpfc/lpfc_debugfs.h
408
if (!q)
drivers/scsi/lpfc/lpfc_debugfs.h
411
dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev,
drivers/scsi/lpfc/lpfc_debugfs.h
415
(q->phba)->brd_no,
drivers/scsi/lpfc/lpfc_debugfs.h
416
q->queue_id, q->type, q->subtype,
drivers/scsi/lpfc/lpfc_debugfs.h
417
q->entry_size, q->entry_count,
drivers/scsi/lpfc/lpfc_debugfs.h
418
q->host_index, q->hba_index);
drivers/scsi/lpfc/lpfc_debugfs.h
419
entry_count = q->entry_count;
drivers/scsi/lpfc/lpfc_debugfs.h
421
lpfc_debug_dump_qe(q, idx);
drivers/scsi/lpfc/lpfc_sli.c
266
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
drivers/scsi/lpfc/lpfc_sli.c
277
if (unlikely(!q))
drivers/scsi/lpfc/lpfc_sli.c
280
temp_wqe = lpfc_sli4_qe(q, q->host_index);
drivers/scsi/lpfc/lpfc_sli.c
283
idx = ((q->host_index + 1) % q->entry_count);
drivers/scsi/lpfc/lpfc_sli.c
284
if (idx == q->hba_index) {
drivers/scsi/lpfc/lpfc_sli.c
285
q->WQ_overflow++;
drivers/scsi/lpfc/lpfc_sli.c
288
q->WQ_posted++;
drivers/scsi/lpfc/lpfc_sli.c
290
if (!((q->host_index + 1) % q->notify_interval))
drivers/scsi/lpfc/lpfc_sli.c
294
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
drivers/scsi/lpfc/lpfc_sli.c
295
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
296
lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
drivers/scsi/lpfc/lpfc_sli.c
297
if (q->dpp_enable && q->phba->cfg_enable_dpp) {
drivers/scsi/lpfc/lpfc_sli.c
301
for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
drivers/scsi/lpfc/lpfc_sli.c
303
q->dpp_regaddr + i);
drivers/scsi/lpfc/lpfc_sli.c
305
for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
drivers/scsi/lpfc/lpfc_sli.c
307
q->dpp_regaddr + i);
drivers/scsi/lpfc/lpfc_sli.c
314
host_index = q->host_index;
drivers/scsi/lpfc/lpfc_sli.c
316
q->host_index = idx;
drivers/scsi/lpfc/lpfc_sli.c
320
if (q->db_format == LPFC_DB_LIST_FORMAT) {
drivers/scsi/lpfc/lpfc_sli.c
321
if (q->dpp_enable && q->phba->cfg_enable_dpp) {
drivers/scsi/lpfc/lpfc_sli.c
325
q->dpp_id);
drivers/scsi/lpfc/lpfc_sli.c
327
q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
330
bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
334
&q->phba->sli4_hba.sli_intf);
drivers/scsi/lpfc/lpfc_sli.c
339
} else if (q->db_format == LPFC_DB_RING_FORMAT) {
drivers/scsi/lpfc/lpfc_sli.c
341
bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
345
writel(doorbell.word0, q->db_regaddr);
drivers/scsi/lpfc/lpfc_sli.c
361
lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
drivers/scsi/lpfc/lpfc_sli.c
364
if (unlikely(!q))
drivers/scsi/lpfc/lpfc_sli.c
367
q->hba_index = index;
drivers/scsi/lpfc/lpfc_sli.c
383
lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
drivers/scsi/lpfc/lpfc_sli.c
389
if (unlikely(!q))
drivers/scsi/lpfc/lpfc_sli.c
391
temp_mqe = lpfc_sli4_qe(q, q->host_index);
drivers/scsi/lpfc/lpfc_sli.c
394
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
drivers/scsi/lpfc/lpfc_sli.c
396
lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
drivers/scsi/lpfc/lpfc_sli.c
398
q->phba->mbox = (MAILBOX_t *)temp_mqe;
drivers/scsi/lpfc/lpfc_sli.c
401
q->host_index = ((q->host_index + 1) % q->entry_count);
drivers/scsi/lpfc/lpfc_sli.c
406
bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
407
writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
422
lpfc_sli4_mq_release(struct lpfc_queue *q)
drivers/scsi/lpfc/lpfc_sli.c
425
if (unlikely(!q))
drivers/scsi/lpfc/lpfc_sli.c
429
q->phba->mbox = NULL;
drivers/scsi/lpfc/lpfc_sli.c
430
q->hba_index = ((q->hba_index + 1) % q->entry_count);
drivers/scsi/lpfc/lpfc_sli.c
444
lpfc_sli4_eq_get(struct lpfc_queue *q)
drivers/scsi/lpfc/lpfc_sli.c
449
if (unlikely(!q))
drivers/scsi/lpfc/lpfc_sli.c
451
eqe = lpfc_sli4_qe(q, q->host_index);
drivers/scsi/lpfc/lpfc_sli.c
454
if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
drivers/scsi/lpfc/lpfc_sli.c
476
lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
drivers/scsi/lpfc/lpfc_sli.c
484
(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
drivers/scsi/lpfc/lpfc_sli.c
485
bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
486
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
495
lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
drivers/scsi/lpfc/lpfc_sli.c
500
bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
501
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
516
lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli.c
522
if (unlikely(!q || (count == 0 && !arm)))
drivers/scsi/lpfc/lpfc_sli.c
534
(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
drivers/scsi/lpfc/lpfc_sli.c
535
bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
536
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
538
if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
drivers/scsi/lpfc/lpfc_sli.c
539
readl(q->phba->sli4_hba.EQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
554
lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli.c
560
if (unlikely(!q || (count == 0 && !arm)))
drivers/scsi/lpfc/lpfc_sli.c
568
bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
569
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
571
if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
drivers/scsi/lpfc/lpfc_sli.c
572
readl(q->phba->sli4_hba.EQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
685
lpfc_sli4_cq_get(struct lpfc_queue *q)
drivers/scsi/lpfc/lpfc_sli.c
690
if (unlikely(!q))
drivers/scsi/lpfc/lpfc_sli.c
692
cqe = lpfc_sli4_qe(q, q->host_index);
drivers/scsi/lpfc/lpfc_sli.c
695
if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
drivers/scsi/lpfc/lpfc_sli.c
736
lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli.c
742
if (unlikely(!q || (count == 0 && !arm)))
drivers/scsi/lpfc/lpfc_sli.c
752
(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
drivers/scsi/lpfc/lpfc_sli.c
753
bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
754
writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
769
lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli.c
775
if (unlikely(!q || (count == 0 && !arm)))
drivers/scsi/lpfc/lpfc_sli.c
783
bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
drivers/scsi/lpfc/lpfc_sli.c
784
writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
drivers/scsi/lpfc/lpfc_sli.c
91
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
drivers/scsi/lpfc/lpfc_sli4.h
1152
void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli4.h
1154
void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli4.h
1156
void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q);
drivers/scsi/lpfc/lpfc_sli4.h
1157
void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli4.h
1159
void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
drivers/scsi/lpfc/lpfc_sli4.h
1184
static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
drivers/scsi/lpfc/lpfc_sli4.h
1186
return q->q_pgs[idx / q->entry_cnt_per_pg] +
drivers/scsi/lpfc/lpfc_sli4.h
1187
(q->entry_size * (idx % q->entry_cnt_per_pg));
drivers/scsi/lpfc/lpfc_sli4.h
845
void (*sli4_eq_clr_intr)(struct lpfc_queue *q);
drivers/scsi/mpi3mr/mpi3mr_app.c
3039
struct request_queue *q;
drivers/scsi/mpi3mr/mpi3mr_app.c
3055
q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), &lim,
drivers/scsi/mpi3mr/mpi3mr_app.c
3057
if (IS_ERR(q)) {
drivers/scsi/mpi3mr/mpi3mr_app.c
3065
mrioc->bsg_queue = q;
drivers/scsi/mpt3sas/mpt3sas_base.c
5541
sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
drivers/scsi/mpt3sas/mpt3sas_base.c
5590
bias.q = sas_addr.q;
drivers/scsi/mpt3sas/mpt3sas_base.c
5594
temp.q = sas_addr.q;
drivers/scsi/mpt3sas/mpt3sas_base.c
5596
bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
drivers/scsi/mpt3sas/mpt3sas_base.c
5597
bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
drivers/scsi/mpt3sas/mpt3sas_base.h
1679
U64 q;
drivers/scsi/ncr53c8xx.c
274
m_link_s *q;
drivers/scsi/ncr53c8xx.c
300
q = &h[i];
drivers/scsi/ncr53c8xx.c
301
while (q->next && q->next != (m_link_s *) b) {
drivers/scsi/ncr53c8xx.c
302
q = q->next;
drivers/scsi/ncr53c8xx.c
304
if (!q->next) {
drivers/scsi/ncr53c8xx.c
309
q->next = q->next->next;
drivers/scsi/qedf/qedf_io.c
1163
if (!scsi_cmd_to_rq(sc_cmd)->q) {
drivers/scsi/qedi/qedi_fw.c
612
if (!scsi_cmd_to_rq(sc_cmd)->q) {
drivers/scsi/qedi/qedi_iscsi.h
236
#define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \
drivers/scsi/qedi/qedi_iscsi.h
237
(q)->state == EP_STATE_OFLDCONN_COMPL)
drivers/scsi/qla2xxx/qla_dbg.c
557
struct qla2xxx_mqueue_chain *q;
drivers/scsi/qla2xxx/qla_dbg.c
576
q = ptr;
drivers/scsi/qla2xxx/qla_dbg.c
577
*last_chain = &q->type;
drivers/scsi/qla2xxx/qla_dbg.c
578
q->type = htonl(DUMP_CHAIN_QUEUE);
drivers/scsi/qla2xxx/qla_dbg.c
579
q->chain_size = htonl(
drivers/scsi/qla2xxx/qla_dbg.c
604
struct qla2xxx_mqueue_chain *q;
drivers/scsi/qla2xxx/qla_dbg.c
620
q = ptr;
drivers/scsi/qla2xxx/qla_dbg.c
621
*last_chain = &q->type;
drivers/scsi/qla2xxx/qla_dbg.c
622
q->type = htonl(DUMP_CHAIN_QUEUE);
drivers/scsi/qla2xxx/qla_dbg.c
623
q->chain_size = htonl(
drivers/scsi/qla2xxx/qla_dbg.c
648
q = ptr;
drivers/scsi/qla2xxx/qla_dbg.c
649
*last_chain = &q->type;
drivers/scsi/qla2xxx/qla_dbg.c
650
q->type = htonl(DUMP_CHAIN_QUEUE);
drivers/scsi/qla2xxx/qla_dbg.c
651
q->chain_size = htonl(
drivers/scsi/qla2xxx/qla_edif.c
1887
struct enode *node, *q;
drivers/scsi/qla2xxx/qla_edif.c
1902
list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
drivers/scsi/qla2xxx/qla_edif.c
1990
struct enode *list_node, *q;
drivers/scsi/qla2xxx/qla_edif.c
1998
list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
drivers/scsi/qla2xxx/qla_edif.c
2160
struct edb_node *node, *q;
drivers/scsi/qla2xxx/qla_edif.c
2174
list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
drivers/scsi/qla2xxx/qla_init.c
5658
__be32 *q;
drivers/scsi/qla2xxx/qla_init.c
5669
q = (__be32 *)&ha->plogi_els_payld.fl_csp;
drivers/scsi/qla2xxx/qla_init.c
5672
cpu_to_be32_array(q, bp, sz / 4);
drivers/scsi/qla2xxx/qla_os.c
5151
bool q = false;
drivers/scsi/qla2xxx/qla_os.c
5157
q = true;
drivers/scsi/qla2xxx/qla_os.c
5161
if (q)
drivers/scsi/qla2xxx/qla_os.c
7528
bool q = false;
drivers/scsi/qla2xxx/qla_os.c
7532
q = true;
drivers/scsi/qla2xxx/qla_os.c
7534
if (q)
drivers/scsi/scsi_bsg.c
12
static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
drivers/scsi/scsi_bsg.c
28
rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
drivers/scsi/scsi_bsg.c
50
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
drivers/scsi/scsi_bsg.c
53
ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
drivers/scsi/scsi_dh.c
251
int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
drivers/scsi/scsi_dh.c
256
sdev = scsi_device_from_queue(q);
drivers/scsi/scsi_dh.c
298
int scsi_dh_set_params(struct request_queue *q, const char *params)
drivers/scsi/scsi_dh.c
303
sdev = scsi_device_from_queue(q);
drivers/scsi/scsi_dh.c
320
int scsi_dh_attach(struct request_queue *q, const char *name)
drivers/scsi/scsi_dh.c
326
sdev = scsi_device_from_queue(q);
drivers/scsi/scsi_dh.c
360
const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
drivers/scsi/scsi_dh.c
365
sdev = scsi_device_from_queue(q);
drivers/scsi/scsi_ioctl.c
228
static int sg_emulated_host(struct request_queue *q, int __user *p)
drivers/scsi/scsi_ioctl.c
520
static int sg_scsi_ioctl(struct request_queue *q, bool open_for_write,
drivers/scsi/scsi_ioctl.c
552
rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
drivers/scsi/scsi_ioctl.c
884
struct request_queue *q = sdev->request_queue;
drivers/scsi/scsi_ioctl.c
915
return sg_emulated_host(q, arg);
drivers/scsi/scsi_ioctl.c
919
return sg_scsi_ioctl(q, open_for_write, arg);
drivers/scsi/scsi_lib.c
1163
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
drivers/scsi/scsi_lib.c
1165
(rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
drivers/scsi/scsi_lib.c
1247
struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
drivers/scsi/scsi_lib.c
1252
rq = blk_mq_alloc_request(q, opf, flags);
drivers/scsi/scsi_lib.c
127
blk_mq_delay_kick_requeue_list(rq->q, msecs);
drivers/scsi/scsi_lib.c
1368
static inline int scsi_dev_queue_ready(struct request_queue *q,
drivers/scsi/scsi_lib.c
1458
static inline int scsi_host_queue_ready(struct request_queue *q,
drivers/scsi/scsi_lib.c
1515
static bool scsi_mq_lld_busy(struct request_queue *q)
drivers/scsi/scsi_lib.c
1517
struct scsi_device *sdev = q->queuedata;
drivers/scsi/scsi_lib.c
1520
if (blk_queue_dying(q))
drivers/scsi/scsi_lib.c
1675
struct scsi_device *sdev = req->q->queuedata;
drivers/scsi/scsi_lib.c
1745
if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q)))
drivers/scsi/scsi_lib.c
1769
static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
drivers/scsi/scsi_lib.c
1771
struct scsi_device *sdev = q->queuedata;
drivers/scsi/scsi_lib.c
1784
static int scsi_mq_get_budget(struct request_queue *q)
drivers/scsi/scsi_lib.c
1786
struct scsi_device *sdev = q->queuedata;
drivers/scsi/scsi_lib.c
1787
int token = scsi_dev_queue_ready(q, sdev);
drivers/scsi/scsi_lib.c
1833
struct request_queue *q = req->q;
drivers/scsi/scsi_lib.c
1834
struct scsi_device *sdev = q->queuedata;
drivers/scsi/scsi_lib.c
1865
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
drivers/scsi/scsi_lib.c
1919
scsi_mq_put_budget(q, cmd->budget_token);
drivers/scsi/scsi_lib.c
2191
struct scsi_device *scsi_device_from_queue(struct request_queue *q)
drivers/scsi/scsi_lib.c
2195
if (q->mq_ops == &scsi_mq_ops_no_commit ||
drivers/scsi/scsi_lib.c
2196
q->mq_ops == &scsi_mq_ops)
drivers/scsi/scsi_lib.c
2197
sdev = q->queuedata;
drivers/scsi/scsi_lib.c
2818
struct request_queue *q = sdev->request_queue;
drivers/scsi/scsi_lib.c
2832
blk_set_pm_only(q);
drivers/scsi/scsi_lib.c
2834
memflags = blk_mq_freeze_queue(q);
drivers/scsi/scsi_lib.c
2842
blk_mq_unfreeze_queue(q, memflags);
drivers/scsi/scsi_lib.c
2849
blk_clear_pm_only(q);
drivers/scsi/scsi_lib.c
552
static void scsi_run_queue(struct request_queue *q)
drivers/scsi/scsi_lib.c
554
struct scsi_device *sdev = q->queuedata;
drivers/scsi/scsi_lib.c
562
blk_mq_kick_requeue_list(q);
drivers/scsi/scsi_lib.c
568
struct request_queue *q;
drivers/scsi/scsi_lib.c
571
q = sdev->request_queue;
drivers/scsi/scsi_lib.c
572
scsi_run_queue(q);
drivers/scsi/scsi_lib.c
645
struct request_queue *q = sdev->request_queue;
drivers/scsi/scsi_lib.c
650
if (q->limits.features & BLK_FEAT_ADD_RANDOM)
drivers/scsi/scsi_lib.c
651
add_disk_randomness(req->q->disk);
drivers/scsi/scsi_lib.c
678
percpu_ref_get(&q->q_usage_counter);
drivers/scsi/scsi_lib.c
684
percpu_ref_put(&q->q_usage_counter);
drivers/scsi/scsi_logging.c
33
if (!rq->q || !rq->q->disk)
drivers/scsi/scsi_logging.c
35
return rq->q->disk->disk_name;
drivers/scsi/scsi_scan.c
285
struct request_queue *q;
drivers/scsi/scsi_scan.c
339
q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
drivers/scsi/scsi_scan.c
340
if (IS_ERR(q)) {
drivers/scsi/scsi_scan.c
348
sdev->request_queue = q;
drivers/scsi/scsi_transport_fc.c
4216
struct request_queue *q = rport->rqst_q;
drivers/scsi/scsi_transport_fc.c
4218
if (q)
drivers/scsi/scsi_transport_fc.c
4219
blk_mq_run_hw_queues(q, true);
drivers/scsi/scsi_transport_fc.c
4330
struct request_queue *q;
drivers/scsi/scsi_transport_fc.c
4342
q = bsg_setup_queue(dev, bsg_name, &lim, fc_bsg_dispatch,
drivers/scsi/scsi_transport_fc.c
4344
if (IS_ERR(q)) {
drivers/scsi/scsi_transport_fc.c
4348
return PTR_ERR(q);
drivers/scsi/scsi_transport_fc.c
4350
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
drivers/scsi/scsi_transport_fc.c
4351
fc_host->rqst_q = q;
drivers/scsi/scsi_transport_fc.c
4366
struct request_queue *q;
drivers/scsi/scsi_transport_fc.c
4375
q = bsg_setup_queue(dev, dev_name(dev), &lim, fc_bsg_dispatch_prep,
drivers/scsi/scsi_transport_fc.c
4377
if (IS_ERR(q)) {
drivers/scsi/scsi_transport_fc.c
4379
return PTR_ERR(q);
drivers/scsi/scsi_transport_fc.c
4381
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
drivers/scsi/scsi_transport_fc.c
4382
rport->rqst_q = q;
drivers/scsi/scsi_transport_fc.c
4397
fc_bsg_remove(struct request_queue *q)
drivers/scsi/scsi_transport_fc.c
4399
bsg_remove_queue(q);
drivers/scsi/scsi_transport_iscsi.c
1539
struct request_queue *q;
drivers/scsi/scsi_transport_iscsi.c
1547
q = bsg_setup_queue(dev, bsg_name, &lim, iscsi_bsg_host_dispatch, NULL,
drivers/scsi/scsi_transport_iscsi.c
1549
if (IS_ERR(q)) {
drivers/scsi/scsi_transport_iscsi.c
1552
return PTR_ERR(q);
drivers/scsi/scsi_transport_iscsi.c
1555
ihost->bsg_q = q;
drivers/scsi/scsi_transport_sas.c
1668
bsg_remove_queue(rphy->q);
drivers/scsi/scsi_transport_sas.c
194
struct request_queue *q;
drivers/scsi/scsi_transport_sas.c
202
q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), NULL,
drivers/scsi/scsi_transport_sas.c
204
if (IS_ERR(q))
drivers/scsi/scsi_transport_sas.c
205
return PTR_ERR(q);
drivers/scsi/scsi_transport_sas.c
206
rphy->q = q;
drivers/scsi/scsi_transport_sas.c
211
q = bsg_setup_queue(&shost->shost_gendev, name, NULL,
drivers/scsi/scsi_transport_sas.c
213
if (IS_ERR(q))
drivers/scsi/scsi_transport_sas.c
214
return PTR_ERR(q);
drivers/scsi/scsi_transport_sas.c
215
to_sas_host_attrs(shost)->q = q;
drivers/scsi/scsi_transport_sas.c
254
struct request_queue *q = to_sas_host_attrs(shost)->q;
drivers/scsi/scsi_transport_sas.c
256
bsg_remove_queue(q);
drivers/scsi/scsi_transport_sas.c
48
struct request_queue *q;
drivers/scsi/sd.c
1038
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1065
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1091
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1190
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1205
rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
drivers/scsi/sd.c
1225
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1360
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd.c
1382
if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
drivers/scsi/sd.c
1476
switch (scsi_disk(rq->q->disk)->provisioning_mode) {
drivers/scsi/sd.c
2201
struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
drivers/scsi/sd.c
2221
struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
drivers/scsi/sd.c
2318
struct scsi_disk *sdkp = scsi_disk(req->q->disk);
drivers/scsi/sd.c
948
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd_zbc.c
165
struct request_queue *q = sdkp->disk->queue;
drivers/scsi/sd_zbc.c
184
queue_max_hw_sectors(q) << SECTOR_SHIFT);
drivers/scsi/sd_zbc.c
185
max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q));
drivers/scsi/sd_zbc.c
297
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd_zbc.c
329
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
drivers/scsi/sd_zbc.c
544
struct request_queue *q = disk->queue;
drivers/scsi/sd_zbc.c
554
if (!blk_queue_is_zoned(q))
drivers/scsi/sg.c
1433
struct request_queue *q = scsidp->request_queue;
drivers/scsi/sg.c
1474
sdp->sg_tablesize = queue_max_segments(q);
drivers/scsi/sg.c
1569
struct request_queue *q = sdp->device->request_queue;
drivers/scsi/sg.c
1577
blk_trace_remove(q);
drivers/scsi/sg.c
1578
blk_put_queue(q);
drivers/scsi/sg.c
1733
struct request_queue *q = sfp->parentdp->device->request_queue;
drivers/scsi/sg.c
1753
rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
drivers/scsi/sg.c
1776
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
drivers/scsi/sg.c
288
struct request_queue *q;
drivers/scsi/sg.c
346
q = device->request_queue;
drivers/scsi/sg.c
347
sdp->sg_tablesize = queue_max_segments(q);
drivers/scsi/sg.c
845
static int max_sectors_bytes(struct request_queue *q)
drivers/scsi/sg.c
847
unsigned int max_sectors = queue_max_sectors(q);
drivers/scsi/sr.c
302
struct scsi_cd *cd = scsi_cd(rq->q->disk);
drivers/scsi/sr.c
369
cd = scsi_cd(rq->q->disk);
drivers/scsi/sr.c
478
struct request_queue *q = cd->device->request_queue;
drivers/scsi/sr.c
489
lim = queue_limits_start_update(q);
drivers/scsi/sr.c
492
return queue_limits_commit_update_frozen(q, &lim);
drivers/scsi/st.c
573
err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
drivers/scsi/sym53c8xx_2/sym_malloc.c
126
q = &h[i];
drivers/scsi/sym53c8xx_2/sym_malloc.c
127
while (q->next && q->next != (m_link_p) b) {
drivers/scsi/sym53c8xx_2/sym_malloc.c
128
q = q->next;
drivers/scsi/sym53c8xx_2/sym_malloc.c
130
if (!q->next) {
drivers/scsi/sym53c8xx_2/sym_malloc.c
135
q->next = q->next->next;
drivers/scsi/sym53c8xx_2/sym_malloc.c
97
m_link_p q;
drivers/scsi/virtio_scsi.c
549
bi = blk_get_integrity(rq->q->disk);
drivers/soc/fsl/qbman/qman_priv.h
105
struct __qm_mcr_querycongestion q;
drivers/soc/fsl/qbman/qman_priv.h
120
return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
drivers/soc/fsl/qbman/qman_priv.h
133
u32 *_d = dest->q.state;
drivers/soc/fsl/qbman/qman_priv.h
134
const u32 *_a = a->q.state;
drivers/soc/fsl/qbman/qman_priv.h
135
const u32 *_b = b->q.state;
drivers/soc/fsl/qbman/qman_priv.h
145
u32 *_d = dest->q.state;
drivers/soc/fsl/qbman/qman_priv.h
146
const u32 *_a = a->q.state;
drivers/soc/fsl/qbman/qman_priv.h
147
const u32 *_b = b->q.state;
drivers/soc/fsl/qbman/qman_priv.h
82
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
drivers/soc/fsl/qbman/qman_priv.h
84
return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
drivers/soc/fsl/qbman/qman_priv.h
86
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
drivers/soc/fsl/qbman/qman_priv.h
88
return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
drivers/soc/qcom/rpmh.c
27
#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
drivers/soc/qcom/rpmh.c
36
.completion = q, \
drivers/soc/ti/knav_qmss_acc.c
20
#define knav_range_offset_to_inst(kdev, range, q) \
drivers/soc/ti/knav_qmss_acc.c
21
(range->queue_base_inst + (q << kdev->inst_shift))
drivers/spi/spi-fsl-qspi.c
1002
struct fsl_qspi *q = dev_get_drvdata(dev);
drivers/spi/spi-fsl-qspi.c
1004
fsl_qspi_default_setup(q);
drivers/spi/spi-fsl-qspi.c
295
static bool needs_swap_endian(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
297
return !!(q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN);
drivers/spi/spi-fsl-qspi.c
300
static bool needs_4x_clock(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
302
return !!(q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK);
drivers/spi/spi-fsl-qspi.c
305
static bool needs_fill_txfifo(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
307
return !!(q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890);
drivers/spi/spi-fsl-qspi.c
310
static bool needs_wakeup_wait_mode(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
312
return !!(q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618);
drivers/spi/spi-fsl-qspi.c
315
static bool needs_amba_base_offset(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
317
return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
drivers/spi/spi-fsl-qspi.c
320
static bool needs_tdh_setting(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
322
return !!(q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING);
drivers/spi/spi-fsl-qspi.c
325
static bool needs_clk_disable(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
327
return !(q->devtype_data->quirks & QUADSPI_QUIRK_SKIP_CLK_DISABLE);
drivers/spi/spi-fsl-qspi.c
334
static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
drivers/spi/spi-fsl-qspi.c
336
return needs_swap_endian(q) ? __swab32(a) : a;
drivers/spi/spi-fsl-qspi.c
346
static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
drivers/spi/spi-fsl-qspi.c
348
if (q->devtype_data->little_endian)
drivers/spi/spi-fsl-qspi.c
354
static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
drivers/spi/spi-fsl-qspi.c
356
if (q->devtype_data->little_endian)
drivers/spi/spi-fsl-qspi.c
364
struct fsl_qspi *q = dev_id;
drivers/spi/spi-fsl-qspi.c
368
reg = qspi_readl(q, q->iobase + QUADSPI_FR);
drivers/spi/spi-fsl-qspi.c
369
qspi_writel(q, reg, q->iobase + QUADSPI_FR);
drivers/spi/spi-fsl-qspi.c
372
complete(&q->c);
drivers/spi/spi-fsl-qspi.c
374
dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
drivers/spi/spi-fsl-qspi.c
378
static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
drivers/spi/spi-fsl-qspi.c
393
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
drivers/spi/spi-fsl-qspi.c
396
ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
drivers/spi/spi-fsl-qspi.c
399
ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
drivers/spi/spi-fsl-qspi.c
402
ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
drivers/spi/spi-fsl-qspi.c
405
ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
drivers/spi/spi-fsl-qspi.c
426
(op->data.nbytes > q->devtype_data->ahb_buf_size ||
drivers/spi/spi-fsl-qspi.c
427
(op->data.nbytes > q->devtype_data->rxfifo - 4 &&
drivers/spi/spi-fsl-qspi.c
432
op->data.nbytes > q->devtype_data->txfifo)
drivers/spi/spi-fsl-qspi.c
438
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
drivers/spi/spi-fsl-qspi.c
441
void __iomem *base = q->iobase;
drivers/spi/spi-fsl-qspi.c
482
qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
drivers/spi/spi-fsl-qspi.c
483
qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
drivers/spi/spi-fsl-qspi.c
487
qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
drivers/spi/spi-fsl-qspi.c
490
qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
drivers/spi/spi-fsl-qspi.c
491
qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
drivers/spi/spi-fsl-qspi.c
494
static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
498
ret = clk_prepare_enable(q->clk_en);
drivers/spi/spi-fsl-qspi.c
502
ret = clk_prepare_enable(q->clk);
drivers/spi/spi-fsl-qspi.c
504
clk_disable_unprepare(q->clk_en);
drivers/spi/spi-fsl-qspi.c
508
if (needs_wakeup_wait_mode(q))
drivers/spi/spi-fsl-qspi.c
509
cpu_latency_qos_add_request(&q->pm_qos_req, 0);
drivers/spi/spi-fsl-qspi.c
514
static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
516
if (needs_wakeup_wait_mode(q))
drivers/spi/spi-fsl-qspi.c
517
cpu_latency_qos_remove_request(&q->pm_qos_req);
drivers/spi/spi-fsl-qspi.c
519
clk_disable_unprepare(q->clk);
drivers/spi/spi-fsl-qspi.c
520
clk_disable_unprepare(q->clk_en);
drivers/spi/spi-fsl-qspi.c
530
static void fsl_qspi_invalidate(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
534
reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
drivers/spi/spi-fsl-qspi.c
536
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
drivers/spi/spi-fsl-qspi.c
545
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
drivers/spi/spi-fsl-qspi.c
548
static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi,
drivers/spi/spi-fsl-qspi.c
554
if (q->selected == spi_get_chipselect(spi, 0))
drivers/spi/spi-fsl-qspi.c
557
if (needs_4x_clock(q))
drivers/spi/spi-fsl-qspi.c
560
if (needs_clk_disable(q))
drivers/spi/spi-fsl-qspi.c
561
fsl_qspi_clk_disable_unprep(q);
drivers/spi/spi-fsl-qspi.c
563
ret = clk_set_rate(q->clk, rate);
drivers/spi/spi-fsl-qspi.c
567
if (needs_clk_disable(q)) {
drivers/spi/spi-fsl-qspi.c
568
ret = fsl_qspi_clk_prep_enable(q);
drivers/spi/spi-fsl-qspi.c
573
q->selected = spi_get_chipselect(spi, 0);
drivers/spi/spi-fsl-qspi.c
575
fsl_qspi_invalidate(q);
drivers/spi/spi-fsl-qspi.c
578
static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
drivers/spi/spi-fsl-qspi.c
581
q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
drivers/spi/spi-fsl-qspi.c
585
static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
drivers/spi/spi-fsl-qspi.c
588
void __iomem *base = q->iobase;
drivers/spi/spi-fsl-qspi.c
594
val = fsl_qspi_endian_xchg(q, val);
drivers/spi/spi-fsl-qspi.c
595
qspi_writel(q, val, base + QUADSPI_TBDR);
drivers/spi/spi-fsl-qspi.c
600
val = fsl_qspi_endian_xchg(q, val);
drivers/spi/spi-fsl-qspi.c
601
qspi_writel(q, val, base + QUADSPI_TBDR);
drivers/spi/spi-fsl-qspi.c
604
if (needs_fill_txfifo(q)) {
drivers/spi/spi-fsl-qspi.c
606
qspi_writel(q, 0, base + QUADSPI_TBDR);
drivers/spi/spi-fsl-qspi.c
610
static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
drivers/spi/spi-fsl-qspi.c
613
void __iomem *base = q->iobase;
drivers/spi/spi-fsl-qspi.c
619
val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
drivers/spi/spi-fsl-qspi.c
620
val = fsl_qspi_endian_xchg(q, val);
drivers/spi/spi-fsl-qspi.c
625
val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
drivers/spi/spi-fsl-qspi.c
626
val = fsl_qspi_endian_xchg(q, val);
drivers/spi/spi-fsl-qspi.c
631
static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
drivers/spi/spi-fsl-qspi.c
633
void __iomem *base = q->iobase;
drivers/spi/spi-fsl-qspi.c
636
init_completion(&q->c);
drivers/spi/spi-fsl-qspi.c
643
qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
drivers/spi/spi-fsl-qspi.c
647
if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
drivers/spi/spi-fsl-qspi.c
651
fsl_qspi_read_rxfifo(q, op);
drivers/spi/spi-fsl-qspi.c
656
static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
drivers/spi/spi-fsl-qspi.c
661
if (!q->devtype_data->little_endian)
drivers/spi/spi-fsl-qspi.c
670
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
drivers/spi/spi-fsl-qspi.c
671
void __iomem *base = q->iobase;
drivers/spi/spi-fsl-qspi.c
674
int invalid_mstrid = q->devtype_data->invalid_mstrid;
drivers/spi/spi-fsl-qspi.c
676
mutex_lock(&q->lock);
drivers/spi/spi-fsl-qspi.c
679
fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
drivers/spi/spi-fsl-qspi.c
682
fsl_qspi_select_mem(q, mem->spi, op);
drivers/spi/spi-fsl-qspi.c
684
if (needs_amba_base_offset(q))
drivers/spi/spi-fsl-qspi.c
685
addr_offset = q->memmap_phy;
drivers/spi/spi-fsl-qspi.c
687
qspi_writel(q,
drivers/spi/spi-fsl-qspi.c
688
q->selected * q->devtype_data->ahb_buf_size + addr_offset,
drivers/spi/spi-fsl-qspi.c
691
qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
drivers/spi/spi-fsl-qspi.c
695
qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
drivers/spi/spi-fsl-qspi.c
698
qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF0CR);
drivers/spi/spi-fsl-qspi.c
699
qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF1CR);
drivers/spi/spi-fsl-qspi.c
700
qspi_writel(q, invalid_mstrid, base + QUADSPI_BUF2CR);
drivers/spi/spi-fsl-qspi.c
702
fsl_qspi_prepare_lut(q, op);
drivers/spi/spi-fsl-qspi.c
709
if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
drivers/spi/spi-fsl-qspi.c
711
fsl_qspi_read_ahb(q, op);
drivers/spi/spi-fsl-qspi.c
713
qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
drivers/spi/spi-fsl-qspi.c
717
fsl_qspi_fill_txfifo(q, op);
drivers/spi/spi-fsl-qspi.c
719
err = fsl_qspi_do_op(q, op);
drivers/spi/spi-fsl-qspi.c
723
fsl_qspi_invalidate(q);
drivers/spi/spi-fsl-qspi.c
725
mutex_unlock(&q->lock);
drivers/spi/spi-fsl-qspi.c
732
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
drivers/spi/spi-fsl-qspi.c
735
if (op->data.nbytes > q->devtype_data->txfifo)
drivers/spi/spi-fsl-qspi.c
736
op->data.nbytes = q->devtype_data->txfifo;
drivers/spi/spi-fsl-qspi.c
738
if (op->data.nbytes > q->devtype_data->ahb_buf_size)
drivers/spi/spi-fsl-qspi.c
739
op->data.nbytes = q->devtype_data->ahb_buf_size;
drivers/spi/spi-fsl-qspi.c
740
else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
drivers/spi/spi-fsl-qspi.c
747
static int fsl_qspi_default_setup(struct fsl_qspi *q)
drivers/spi/spi-fsl-qspi.c
749
void __iomem *base = q->iobase;
drivers/spi/spi-fsl-qspi.c
755
fsl_qspi_clk_disable_unprep(q);
drivers/spi/spi-fsl-qspi.c
758
ret = clk_set_rate(q->clk, 66000000);
drivers/spi/spi-fsl-qspi.c
762
ret = fsl_qspi_clk_prep_enable(q);
drivers/spi/spi-fsl-qspi.c
767
qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
drivers/spi/spi-fsl-qspi.c
772
qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
drivers/spi/spi-fsl-qspi.c
780
if (needs_tdh_setting(q))
drivers/spi/spi-fsl-qspi.c
781
qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
drivers/spi/spi-fsl-qspi.c
785
reg = qspi_readl(q, base + QUADSPI_SMPR);
drivers/spi/spi-fsl-qspi.c
786
qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
drivers/spi/spi-fsl-qspi.c
792
qspi_writel(q, 0, base + QUADSPI_BUF0IND);
drivers/spi/spi-fsl-qspi.c
793
qspi_writel(q, 0, base + QUADSPI_BUF1IND);
drivers/spi/spi-fsl-qspi.c
794
qspi_writel(q, 0, base + QUADSPI_BUF2IND);
drivers/spi/spi-fsl-qspi.c
796
qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
drivers/spi/spi-fsl-qspi.c
797
q->iobase + QUADSPI_BFGENCR);
drivers/spi/spi-fsl-qspi.c
798
qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
drivers/spi/spi-fsl-qspi.c
799
qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
drivers/spi/spi-fsl-qspi.c
800
QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
drivers/spi/spi-fsl-qspi.c
803
if (needs_amba_base_offset(q))
drivers/spi/spi-fsl-qspi.c
804
addr_offset = q->memmap_phy;
drivers/spi/spi-fsl-qspi.c
816
sfa_size = q->devtype_data->sfa_size ? : q->devtype_data->ahb_buf_size;
drivers/spi/spi-fsl-qspi.c
817
qspi_writel(q, addr_offset + 1 * sfa_size, base + QUADSPI_SFA1AD);
drivers/spi/spi-fsl-qspi.c
818
qspi_writel(q, addr_offset + 2 * sfa_size, base + QUADSPI_SFA2AD);
drivers/spi/spi-fsl-qspi.c
819
qspi_writel(q, addr_offset + 3 * sfa_size, base + QUADSPI_SFB1AD);
drivers/spi/spi-fsl-qspi.c
820
qspi_writel(q, addr_offset + 4 * sfa_size, base + QUADSPI_SFB2AD);
drivers/spi/spi-fsl-qspi.c
822
q->selected = -1;
drivers/spi/spi-fsl-qspi.c
825
qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
drivers/spi/spi-fsl-qspi.c
829
qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
drivers/spi/spi-fsl-qspi.c
832
qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
drivers/spi/spi-fsl-qspi.c
839
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
drivers/spi/spi-fsl-qspi.c
848
if (of_get_available_child_count(q->dev->of_node) == 1)
drivers/spi/spi-fsl-qspi.c
849
return dev_name(q->dev);
drivers/spi/spi-fsl-qspi.c
852
"%s-%d", dev_name(q->dev),
drivers/spi/spi-fsl-qspi.c
876
struct fsl_qspi *q = data;
drivers/spi/spi-fsl-qspi.c
879
qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
drivers/spi/spi-fsl-qspi.c
880
qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
drivers/spi/spi-fsl-qspi.c
885
struct fsl_qspi *q = data;
drivers/spi/spi-fsl-qspi.c
887
reset_control_assert(q->resets);
drivers/spi/spi-fsl-qspi.c
889
fsl_qspi_clk_disable_unprep(q);
drivers/spi/spi-fsl-qspi.c
891
mutex_destroy(&q->lock);
drivers/spi/spi-fsl-qspi.c
900
struct fsl_qspi *q;
drivers/spi/spi-fsl-qspi.c
903
ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*q));
drivers/spi/spi-fsl-qspi.c
910
q = spi_controller_get_devdata(ctlr);
drivers/spi/spi-fsl-qspi.c
911
q->dev = dev;
drivers/spi/spi-fsl-qspi.c
912
q->devtype_data = of_device_get_match_data(dev);
drivers/spi/spi-fsl-qspi.c
913
if (!q->devtype_data)
drivers/spi/spi-fsl-qspi.c
916
platform_set_drvdata(pdev, q);
drivers/spi/spi-fsl-qspi.c
919
q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
drivers/spi/spi-fsl-qspi.c
920
if (IS_ERR(q->iobase))
drivers/spi/spi-fsl-qspi.c
921
return PTR_ERR(q->iobase);
drivers/spi/spi-fsl-qspi.c
927
q->memmap_phy = res->start;
drivers/spi/spi-fsl-qspi.c
929
q->ahb_addr = devm_ioremap(dev, q->memmap_phy,
drivers/spi/spi-fsl-qspi.c
930
(q->devtype_data->ahb_buf_size * 4));
drivers/spi/spi-fsl-qspi.c
931
if (!q->ahb_addr)
drivers/spi/spi-fsl-qspi.c
934
q->resets = devm_reset_control_array_get_optional_exclusive(dev);
drivers/spi/spi-fsl-qspi.c
935
if (IS_ERR(q->resets))
drivers/spi/spi-fsl-qspi.c
936
return PTR_ERR(q->resets);
drivers/spi/spi-fsl-qspi.c
939
q->clk_en = devm_clk_get(dev, "qspi_en");
drivers/spi/spi-fsl-qspi.c
940
if (IS_ERR(q->clk_en))
drivers/spi/spi-fsl-qspi.c
941
return PTR_ERR(q->clk_en);
drivers/spi/spi-fsl-qspi.c
943
q->clk = devm_clk_get(dev, "qspi");
drivers/spi/spi-fsl-qspi.c
944
if (IS_ERR(q->clk))
drivers/spi/spi-fsl-qspi.c
945
return PTR_ERR(q->clk);
drivers/spi/spi-fsl-qspi.c
947
mutex_init(&q->lock);
drivers/spi/spi-fsl-qspi.c
949
ret = fsl_qspi_clk_prep_enable(q);
drivers/spi/spi-fsl-qspi.c
955
ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
drivers/spi/spi-fsl-qspi.c
959
ret = reset_control_deassert(q->resets);
drivers/spi/spi-fsl-qspi.c
969
fsl_qspi_irq_handler, 0, pdev->name, q);
drivers/spi/spi-fsl-qspi.c
980
fsl_qspi_default_setup(q);
drivers/spi/spi-fsl-qspi.c
984
ret = devm_add_action_or_reset(dev, fsl_qspi_disable, q);
drivers/spi/spi-pxa2xx.c
808
unsigned long q, q1, q2;
drivers/spi/spi-pxa2xx.c
854
q = q1;
drivers/spi/spi-pxa2xx.c
858
q = q2;
drivers/spi/spi-pxa2xx.c
879
q = 1;
drivers/spi/spi-pxa2xx.c
885
return q - 1;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
203
ia_css_queue_t *q = NULL;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
210
q = &css_queues.host2sp_buffer_queue_handles[thread][id];
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
215
q = &css_queues.sp2host_buffer_queue_handles[id];
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
218
q = &css_queues.host2sp_psys_event_queue_handle;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
221
q = &css_queues.sp2host_psys_event_queue_handle;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
224
q = &css_queues.host2sp_isys_event_queue_handle;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
227
q = &css_queues.sp2host_isys_event_queue_handle;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
230
q = &css_queues.host2sp_tag_cmd_queue_handle;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
236
return q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
322
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
331
q = bufq_get_qhandle(sh_css_host2sp_buffer_queue,
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
334
if (q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
335
error = ia_css_queue_enqueue(q, item);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
350
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
359
q = bufq_get_qhandle(sh_css_sp2host_buffer_queue,
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
362
if (q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
363
error = ia_css_queue_dequeue(q, item);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
380
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
383
q = bufq_get_qhandle(sh_css_host2sp_psys_event_queue, -1, -1);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
384
if (!q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
389
error = ia_css_eventq_send(q,
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
400
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
408
q = bufq_get_qhandle(sh_css_sp2host_psys_event_queue, -1, -1);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
409
if (!q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
413
error = ia_css_eventq_recv(q, item);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
422
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
430
q = bufq_get_qhandle(sh_css_sp2host_isys_event_queue, -1, -1);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
431
if (!q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
435
error = ia_css_eventq_recv(q, item);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
442
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
445
q = bufq_get_qhandle(sh_css_host2sp_isys_event_queue, -1, -1);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
446
if (!q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
451
error = ia_css_eventq_send(q, evt_id, 0, 0, 0);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
461
ia_css_queue_t *q;
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
464
q = bufq_get_qhandle(sh_css_host2sp_tag_cmd_queue, -1, -1);
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
465
if (!q) {
drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c
469
error = ia_css_queue_enqueue(q, item);
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1845
struct vb2_queue *q = &isc->vb2_vidq;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1864
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1865
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1866
q->drv_priv = isc;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1867
q->buf_struct_size = sizeof(struct isc_buffer);
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1868
q->ops = &isc_vb2_ops;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1869
q->mem_ops = &vb2_dma_contig_memops;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1870
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1871
q->lock = &isc->lock;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1872
q->min_queued_buffers = 1;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1873
q->dev = isc->dev;
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1875
ret = vb2_queue_init(q);
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
1913
vdev->queue = q;
drivers/staging/media/imx/imx-media-capture.c
1016
vq = &priv->q;
drivers/staging/media/imx/imx-media-capture.c
195
if (vb2_is_busy(&priv->q)) {
drivers/staging/media/imx/imx-media-capture.c
42
struct vb2_queue q; /* The videobuf2 queue */
drivers/staging/media/imx/imx-media-capture.c
456
if (vb2_is_busy(&priv->q)) {
drivers/staging/media/imx/imx-media-capture.c
496
if (vb2_is_busy(&priv->q))
drivers/staging/media/imx/imx-media-capture.c
807
struct vb2_queue *vq = &priv->q;
drivers/staging/media/imx/imx-media-capture.c
861
struct vb2_queue *vq = &priv->q;
drivers/staging/media/imx/imx-media-capture.c
999
vfd->queue = &priv->q;
drivers/staging/media/imx/imx-media-csc-scaler.c
504
static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
drivers/staging/media/imx/imx-media-csc-scaler.c
508
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
drivers/staging/media/imx/imx-media-csc-scaler.c
516
(q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
drivers/staging/media/imx/imx-media-csc-scaler.c
553
static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
drivers/staging/media/imx/imx-media-csc-scaler.c
555
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
drivers/staging/media/imx/imx-media-csc-scaler.c
565
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
drivers/staging/media/ipu3/ipu3-css.c
1063
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
drivers/staging/media/ipu3/ipu3-css.c
1066
return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
drivers/staging/media/ipu3/ipu3-css.c
1067
readb(&q->host2sp_evtq_info.end);
drivers/staging/media/ipu3/ipu3-css.c
1077
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
drivers/staging/media/ipu3/ipu3-css.c
1082
size = readb(&q->host2sp_bufq_info[thread][queue].size);
drivers/staging/media/ipu3/ipu3-css.c
1083
start = readb(&q->host2sp_bufq_info[thread][queue].start);
drivers/staging/media/ipu3/ipu3-css.c
1084
end = readb(&q->host2sp_bufq_info[thread][queue].end);
drivers/staging/media/ipu3/ipu3-css.c
1086
size = readb(&q->host2sp_evtq_info.size);
drivers/staging/media/ipu3/ipu3-css.c
1087
start = readb(&q->host2sp_evtq_info.start);
drivers/staging/media/ipu3/ipu3-css.c
1088
end = readb(&q->host2sp_evtq_info.end);
drivers/staging/media/ipu3/ipu3-css.c
1099
writel(data, &q->host2sp_bufq[thread][queue][end]);
drivers/staging/media/ipu3/ipu3-css.c
1100
writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
drivers/staging/media/ipu3/ipu3-css.c
1102
writel(data, &q->host2sp_evtq[end]);
drivers/staging/media/ipu3/ipu3-css.c
1103
writeb(end2, &q->host2sp_evtq_info.end);
drivers/staging/media/ipu3/ipu3-css.c
1115
struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
drivers/staging/media/ipu3/ipu3-css.c
1120
size = readb(&q->sp2host_bufq_info[queue].size);
drivers/staging/media/ipu3/ipu3-css.c
1121
start = readb(&q->sp2host_bufq_info[queue].start);
drivers/staging/media/ipu3/ipu3-css.c
1122
end = readb(&q->sp2host_bufq_info[queue].end);
drivers/staging/media/ipu3/ipu3-css.c
1124
size = readb(&q->sp2host_evtq_info.size);
drivers/staging/media/ipu3/ipu3-css.c
1125
start = readb(&q->sp2host_evtq_info.start);
drivers/staging/media/ipu3/ipu3-css.c
1126
end = readb(&q->sp2host_evtq_info.end);
drivers/staging/media/ipu3/ipu3-css.c
1138
*data = readl(&q->sp2host_bufq[queue][start]);
drivers/staging/media/ipu3/ipu3-css.c
1139
writeb(start2, &q->sp2host_bufq_info[queue].start);
drivers/staging/media/ipu3/ipu3-css.c
1143
*data = readl(&q->sp2host_evtq[start]);
drivers/staging/media/ipu3/ipu3-css.c
1144
writeb(start2, &q->sp2host_evtq_info.start);
drivers/staging/media/ipu3/ipu3-css.c
1358
int q, r, pipe;
drivers/staging/media/ipu3/ipu3-css.c
1380
for (q = 0; q < IPU3_CSS_QUEUES; q++)
drivers/staging/media/ipu3/ipu3-css.c
1382
&css_pipe->queue[q].bufs,
drivers/staging/media/ipu3/ipu3-css.c
1395
int q;
drivers/staging/media/ipu3/ipu3-css.c
1399
for (q = 0; q < IPU3_CSS_QUEUES; q++)
drivers/staging/media/ipu3/ipu3-css.c
1400
if (!list_empty(&css_pipe->queue[q].bufs))
drivers/staging/media/ipu3/ipu3-css.c
1403
return (q == IPU3_CSS_QUEUES);
drivers/staging/media/ipu3/ipu3-css.c
1426
unsigned int p, q, i;
drivers/staging/media/ipu3/ipu3-css.c
1446
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
drivers/staging/media/ipu3/ipu3-css.c
1447
unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
drivers/staging/media/ipu3/ipu3-css.c
1451
&css_pipe->abi_buffers[q][i],
drivers/staging/media/ipu3/ipu3-css.c
1468
unsigned int p, q, i, abi_buf_num;
drivers/staging/media/ipu3/ipu3-css.c
1472
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
drivers/staging/media/ipu3/ipu3-css.c
1473
abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
drivers/staging/media/ipu3/ipu3-css.c
1475
imgu_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
drivers/staging/media/ipu3/ipu3-css.c
1505
int r, q, pipe;
drivers/staging/media/ipu3/ipu3-css.c
1519
for (q = 0; q < IPU3_CSS_QUEUES; q++) {
drivers/staging/media/ipu3/ipu3-css.c
1520
r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0);
drivers/staging/media/ipu3/ipu3-css.c
169
static bool imgu_css_queue_enabled(struct imgu_css_queue *q)
drivers/staging/media/ipu3/ipu3-css.c
1701
struct imgu_css_queue *q;
drivers/staging/media/ipu3/ipu3-css.c
1705
q = kzalloc_objs(struct imgu_css_queue, IPU3_CSS_QUEUES);
drivers/staging/media/ipu3/ipu3-css.c
1706
if (!q)
drivers/staging/media/ipu3/ipu3-css.c
1709
in = &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
drivers/staging/media/ipu3/ipu3-css.c
171
return q->css_fmt;
drivers/staging/media/ipu3/ipu3-css.c
1710
out = &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
drivers/staging/media/ipu3/ipu3-css.c
1711
vf = &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
drivers/staging/media/ipu3/ipu3-css.c
1722
if (imgu_css_queue_init(&q[i], fmts[i],
drivers/staging/media/ipu3/ipu3-css.c
1746
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
drivers/staging/media/ipu3/ipu3-css.c
1747
!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
drivers/staging/media/ipu3/ipu3-css.c
1753
if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
drivers/staging/media/ipu3/ipu3-css.c
1788
ret = imgu_css_find_binary(css, pipe, q, r);
drivers/staging/media/ipu3/ipu3-css.c
1802
if (imgu_css_queue_init(&q[i], &q[i].fmt.mpix,
drivers/staging/media/ipu3/ipu3-css.c
1809
*fmts[i] = q[i].fmt.mpix;
drivers/staging/media/ipu3/ipu3-css.c
1825
kfree(q);
drivers/staging/media/ipu3/ipu3-css.c
42
#define IPU3_CSS_QUEUE_TO_FLAGS(q) (1 << (q))
drivers/staging/media/ipu7/ipu7-isys-queue.c
527
static int start_streaming(struct vb2_queue *q, unsigned int count)
drivers/staging/media/ipu7/ipu7-isys-queue.c
529
struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q);
drivers/staging/media/ipu7/ipu7-isys-queue.c
616
static void stop_streaming(struct vb2_queue *q)
drivers/staging/media/ipu7/ipu7-isys-queue.c
618
struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q);
drivers/staging/media/ipu7/ipu7-isys-queue.c
62
static int ipu7_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/staging/media/ipu7/ipu7-isys-queue.c
66
struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q);
drivers/staging/media/ipu7/ipu7-syscom.c
15
u32 q)
drivers/staging/media/ipu7/ipu7-syscom.c
17
return ctx->queue_indices + (q * sizeof(struct syscom_queue_indices_s));
drivers/staging/media/ipu7/ipu7-syscom.c
20
void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q)
drivers/staging/media/ipu7/ipu7-syscom.c
22
struct syscom_queue_config *queue_params = &ctx->queue_configs[q];
drivers/staging/media/ipu7/ipu7-syscom.c
23
void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q);
drivers/staging/media/ipu7/ipu7-syscom.c
32
if (q < ctx->num_output_queues) {
drivers/staging/media/ipu7/ipu7-syscom.c
53
void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q)
drivers/staging/media/ipu7/ipu7-syscom.c
55
struct syscom_queue_config *queue_params = &ctx->queue_configs[q];
drivers/staging/media/ipu7/ipu7-syscom.c
56
void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q);
drivers/staging/media/ipu7/ipu7-syscom.c
59
if (q < ctx->num_output_queues)
drivers/staging/media/ipu7/ipu7-syscom.h
31
void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q);
drivers/staging/media/ipu7/ipu7-syscom.h
32
void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q);
drivers/staging/media/meson/vdec/vdec.c
164
static void process_num_buffers(struct vb2_queue *q,
drivers/staging/media/meson/vdec/vdec.c
170
unsigned int q_num_bufs = vb2_get_num_buffers(q);
drivers/staging/media/meson/vdec/vdec.c
187
q->min_queued_buffers = max(fmt_out->min_buffers, sess->num_dst_bufs);
drivers/staging/media/meson/vdec/vdec.c
190
static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
drivers/staging/media/meson/vdec/vdec.c
194
struct amvdec_session *sess = vb2_get_drv_priv(q);
drivers/staging/media/meson/vdec/vdec.c
198
switch (q->type) {
drivers/staging/media/meson/vdec/vdec.c
223
process_num_buffers(q, sess, num_buffers, false);
drivers/staging/media/meson/vdec/vdec.c
230
switch (q->type) {
drivers/staging/media/meson/vdec/vdec.c
252
process_num_buffers(q, sess, num_buffers, true);
drivers/staging/media/meson/vdec/vdec.c
281
static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/staging/media/meson/vdec/vdec.c
283
struct amvdec_session *sess = vb2_get_drv_priv(q);
drivers/staging/media/meson/vdec/vdec.c
294
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/staging/media/meson/vdec/vdec.c
303
q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
drivers/staging/media/meson/vdec/vdec.c
358
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
drivers/staging/media/meson/vdec/vdec.c
396
static void vdec_stop_streaming(struct vb2_queue *q)
drivers/staging/media/meson/vdec/vdec.c
398
struct amvdec_session *sess = vb2_get_drv_priv(q);
drivers/staging/media/meson/vdec/vdec.c
422
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
drivers/staging/media/meson/vdec/vdec_helpers.c
477
vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q);
drivers/staging/media/meson/vdec/vdec_helpers.c
478
vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q);
drivers/staging/media/starfive/camss/stf-video.c
132
static int video_queue_setup(struct vb2_queue *q,
drivers/staging/media/starfive/camss/stf-video.c
138
struct stfcamss_video *video = vb2_get_drv_priv(q);
drivers/staging/media/starfive/camss/stf-video.c
269
static int video_start_streaming(struct vb2_queue *q, unsigned int count)
drivers/staging/media/starfive/camss/stf-video.c
271
struct stfcamss_video *video = vb2_get_drv_priv(q);
drivers/staging/media/starfive/camss/stf-video.c
307
static void video_stop_streaming(struct vb2_queue *q)
drivers/staging/media/starfive/camss/stf-video.c
309
struct stfcamss_video *video = vb2_get_drv_priv(q);
drivers/staging/media/starfive/camss/stf-video.c
497
struct vb2_queue *q;
drivers/staging/media/starfive/camss/stf-video.c
504
q = &video->vb2_q;
drivers/staging/media/starfive/camss/stf-video.c
505
q->drv_priv = video;
drivers/staging/media/starfive/camss/stf-video.c
506
q->mem_ops = &vb2_dma_contig_memops;
drivers/staging/media/starfive/camss/stf-video.c
507
q->ops = &stf_video_vb2_q_ops;
drivers/staging/media/starfive/camss/stf-video.c
508
q->type = video->type;
drivers/staging/media/starfive/camss/stf-video.c
509
q->io_modes = VB2_DMABUF | VB2_MMAP;
drivers/staging/media/starfive/camss/stf-video.c
510
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
drivers/staging/media/starfive/camss/stf-video.c
511
q->buf_struct_size = sizeof(struct stfcamss_buffer);
drivers/staging/media/starfive/camss/stf-video.c
512
q->dev = video->stfcamss->dev;
drivers/staging/media/starfive/camss/stf-video.c
513
q->lock = &video->q_lock;
drivers/staging/media/starfive/camss/stf-video.c
514
q->min_queued_buffers = STFCAMSS_MIN_BUFFERS;
drivers/staging/media/starfive/camss/stf-video.c
515
ret = vb2_queue_init(q);
drivers/staging/media/sunxi/cedrus/cedrus.h
246
struct vb2_queue *q,
drivers/staging/media/sunxi/cedrus/cedrus.h
252
struct vb2_buffer *buf = vb2_find_buffer(q, timestamp);
drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
657
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
drivers/target/target_core_device.c
735
struct se_device_queue *q;
drivers/target/target_core_device.c
737
q = &dev->queues[i];
drivers/target/target_core_device.c
738
INIT_LIST_HEAD(&q->state_list);
drivers/target/target_core_device.c
739
spin_lock_init(&q->lock);
drivers/target/target_core_device.c
741
init_llist_head(&q->sq.cmd_list);
drivers/target/target_core_device.c
742
INIT_WORK(&q->sq.work, target_queued_submit_work);
drivers/target/target_core_device.c
847
struct request_queue *q = bdev_get_queue(bdev);
drivers/target/target_core_device.c
853
attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
drivers/target/target_core_device.c
855
queue_atomic_write_unit_min_bytes(q) / block_size;
drivers/target/target_core_iblock.c
133
q = bdev_get_queue(bd);
drivers/target/target_core_iblock.c
136
dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q),
drivers/target/target_core_iblock.c
139
dev->dev_attrib.hw_queue_depth = q->nr_requests;
drivers/target/target_core_iblock.c
93
struct request_queue *q;
drivers/target/target_core_pscsi.c
287
struct request_queue *q = sd->request_queue;
drivers/target/target_core_pscsi.c
302
min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
drivers/thunderbolt/quirks.c
123
const struct tb_quirk *q = &tb_quirks[i];
drivers/thunderbolt/quirks.c
125
if (q->hw_vendor_id && q->hw_vendor_id != sw->config.vendor_id)
drivers/thunderbolt/quirks.c
127
if (q->hw_device_id && q->hw_device_id != sw->config.device_id)
drivers/thunderbolt/quirks.c
129
if (q->vendor && q->vendor != sw->vendor)
drivers/thunderbolt/quirks.c
131
if (q->device && q->device != sw->device)
drivers/thunderbolt/quirks.c
134
tb_sw_dbg(sw, "running %ps\n", q->hook);
drivers/thunderbolt/quirks.c
135
q->hook(sw);
drivers/tty/hvc/hvsi_lib.c
20
struct hvsi_query q;
drivers/tty/hvc/hvsi_lib.c
261
struct hvsi_query q;
drivers/tty/hvc/hvsi_lib.c
268
q.hdr.type = VS_QUERY_PACKET_HEADER;
drivers/tty/hvc/hvsi_lib.c
269
q.hdr.len = sizeof(struct hvsi_query);
drivers/tty/hvc/hvsi_lib.c
270
q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS);
drivers/tty/hvc/hvsi_lib.c
271
rc = hvsi_send_packet(pv, &q.hdr);
drivers/tty/hvc/hvsi_lib.c
29
q.hdr.type = VS_QUERY_PACKET_HEADER;
drivers/tty/hvc/hvsi_lib.c
30
q.hdr.len = sizeof(struct hvsi_query);
drivers/tty/hvc/hvsi_lib.c
31
q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
drivers/tty/hvc/hvsi_lib.c
32
hvsi_send_packet(pv, &q.hdr);
drivers/tty/synclink_gt.c
161
wait_queue_head_t q;
drivers/tty/synclink_gt.c
2254
wake_up_interruptible(&w->q);
drivers/tty/synclink_gt.c
2920
init_waitqueue_head(&w->q);
drivers/tty/synclink_gt.c
2928
add_wait_queue(&w->q, &w->wait);
drivers/tty/synclink_gt.c
2936
remove_wait_queue(&cw->q, &cw->wait);
drivers/tty/synclink_gt.c
2952
wake_up_interruptible(&(*head)->q);
drivers/tty/vt/consolemap.c
330
struct uni_pagedict *p, *q = NULL;
drivers/tty/vt/consolemap.c
336
if (p && p != q) {
drivers/tty/vt/consolemap.c
339
q = p;
drivers/tty/vt/vt.c
608
u16 *q = p;
drivers/tty/vt/vt.c
611
if (p > q)
drivers/tty/vt/vt.c
612
vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
drivers/tty/vt/vt.c
614
q = p;
drivers/tty/vt/vt.c
621
if (p > q)
drivers/tty/vt/vt.c
622
vc->vc_sw->con_putcs(vc, q, p-q, yy, startx);
drivers/tty/vt/vt.c
710
u16 *q = p;
drivers/tty/vt/vt.c
716
a = scr_readw(q);
drivers/tty/vt/vt.c
718
scr_writew(a, q);
drivers/tty/vt/vt.c
719
q++;
drivers/tty/vt/vt.c
723
a = scr_readw(q);
drivers/tty/vt/vt.c
727
scr_writew(a, q);
drivers/tty/vt/vt.c
728
q++;
drivers/tty/vt/vt.c
732
a = scr_readw(q);
drivers/tty/vt/vt.c
736
scr_writew(a, q);
drivers/tty/vt/vt.c
737
q++;
drivers/ufs/core/ufs_bsg.c
245
struct request_queue *q;
drivers/ufs/core/ufs_bsg.c
259
q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request,
drivers/ufs/core/ufs_bsg.c
261
if (IS_ERR(q)) {
drivers/ufs/core/ufs_bsg.c
262
ret = PTR_ERR(q);
drivers/ufs/core/ufs_bsg.c
267
hba->bsg_queue = q;
drivers/ufs/core/ufshcd-crypto.c
233
void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
drivers/ufs/core/ufshcd-crypto.c
236
blk_crypto_register(&hba->crypto_profile, q);
drivers/ufs/core/ufshcd-crypto.h
110
struct request_queue *q) { }
drivers/ufs/core/ufshcd-crypto.h
75
void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q);
drivers/ufs/core/ufshcd-priv.h
386
static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
387
__must_hold(&q->sq_lock)
drivers/ufs/core/ufshcd-priv.h
391
q->sq_tail_slot++;
drivers/ufs/core/ufshcd-priv.h
392
if (q->sq_tail_slot == q->max_entries)
drivers/ufs/core/ufshcd-priv.h
393
q->sq_tail_slot = 0;
drivers/ufs/core/ufshcd-priv.h
394
val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
drivers/ufs/core/ufshcd-priv.h
395
writel(val, q->mcq_sq_tail);
drivers/ufs/core/ufshcd-priv.h
398
static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
400
u32 val = readl(q->mcq_cq_tail);
drivers/ufs/core/ufshcd-priv.h
402
q->cq_tail_slot = val / sizeof(struct cq_entry);
drivers/ufs/core/ufshcd-priv.h
405
static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
407
return q->cq_head_slot == q->cq_tail_slot;
drivers/ufs/core/ufshcd-priv.h
410
static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
412
q->cq_head_slot++;
drivers/ufs/core/ufshcd-priv.h
413
if (q->cq_head_slot == q->max_entries)
drivers/ufs/core/ufshcd-priv.h
414
q->cq_head_slot = 0;
drivers/ufs/core/ufshcd-priv.h
417
static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
419
writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
drivers/ufs/core/ufshcd-priv.h
422
static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
424
struct cq_entry *cqe = q->cqe_base_addr;
drivers/ufs/core/ufshcd-priv.h
426
return cqe + q->cq_head_slot;
drivers/ufs/core/ufshcd-priv.h
429
static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q)
drivers/ufs/core/ufshcd-priv.h
431
u32 val = readl(q->mcq_sq_head);
drivers/ufs/core/ufshcd.c
5336
struct request_queue *q = sdev->request_queue;
drivers/ufs/core/ufshcd.c
5358
ufshcd_crypto_register(hba, q);
drivers/ufs/core/ufshcd.c
5736
struct scsi_device *sdev = rq->q->queuedata;
drivers/ufs/core/ufshcd.c
5763
struct scsi_device *sdev = rq->q->queuedata;
drivers/ufs/core/ufshcd.c
646
struct scsi_device *sdev = req->q->queuedata;
drivers/ufs/core/ufshcd.c
6585
struct request_queue *q;
drivers/ufs/core/ufshcd.c
6619
q = sdev->request_queue;
drivers/ufs/core/ufshcd.c
6620
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
drivers/ufs/core/ufshcd.c
6621
q->rpm_status == RPM_SUSPENDING))
drivers/ufs/core/ufshcd.c
6622
pm_request_resume(q->dev);
drivers/ufs/core/ufshcd.c
7263
struct request_queue *q = hba->tmf_queue;
drivers/ufs/core/ufshcd.c
7273
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
drivers/usb/core/devio.c
688
struct list_head *p, *q, hitlist;
drivers/usb/core/devio.c
693
list_for_each_safe(p, q, &ps->async_pending)
drivers/usb/fotg210/fotg210-hcd.c
3331
union fotg210_shadow *q = &fotg210->pshadow[frame];
drivers/usb/fotg210/fotg210-hcd.c
3335
while (q->ptr) {
drivers/usb/fotg210/fotg210-hcd.c
3338
hw = q->qh->hw;
drivers/usb/fotg210/fotg210-hcd.c
3341
usecs += q->qh->usecs;
drivers/usb/fotg210/fotg210-hcd.c
3345
usecs += q->qh->c_usecs;
drivers/usb/fotg210/fotg210-hcd.c
3347
q = &q->qh->qh_next;
drivers/usb/fotg210/fotg210-hcd.c
3354
if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
drivers/usb/fotg210/fotg210-hcd.c
3357
hw_p = &q->fstn->hw_next;
drivers/usb/fotg210/fotg210-hcd.c
3358
q = &q->fstn->fstn_next;
drivers/usb/fotg210/fotg210-hcd.c
3361
if (q->itd->hw_transaction[uframe])
drivers/usb/fotg210/fotg210-hcd.c
3362
usecs += q->itd->stream->usecs;
drivers/usb/fotg210/fotg210-hcd.c
3363
hw_p = &q->itd->hw_next;
drivers/usb/fotg210/fotg210-hcd.c
3364
q = &q->itd->itd_next;
drivers/usb/fotg210/fotg210-hcd.c
4575
union fotg210_shadow q, *q_p;
drivers/usb/fotg210/fotg210-hcd.c
4581
q.ptr = q_p->ptr;
drivers/usb/fotg210/fotg210-hcd.c
4585
while (q.ptr) {
drivers/usb/fotg210/fotg210-hcd.c
4596
if (q.itd->hw_transaction[uf] &
drivers/usb/fotg210/fotg210-hcd.c
4601
q_p = &q.itd->itd_next;
drivers/usb/fotg210/fotg210-hcd.c
4602
hw_p = &q.itd->hw_next;
drivers/usb/fotg210/fotg210-hcd.c
4604
q.itd->hw_next);
drivers/usb/fotg210/fotg210-hcd.c
4605
q = *q_p;
drivers/usb/fotg210/fotg210-hcd.c
4615
*q_p = q.itd->itd_next;
drivers/usb/fotg210/fotg210-hcd.c
4616
*hw_p = q.itd->hw_next;
drivers/usb/fotg210/fotg210-hcd.c
4617
type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
drivers/usb/fotg210/fotg210-hcd.c
4619
modified = itd_complete(fotg210, q.itd);
drivers/usb/fotg210/fotg210-hcd.c
4620
q = *q_p;
drivers/usb/fotg210/fotg210-hcd.c
4624
type, frame, q.ptr);
drivers/usb/fotg210/fotg210-hcd.c
4629
q.ptr = NULL;
drivers/usb/host/ehci-sched.c
2358
union ehci_shadow q, *q_p;
drivers/usb/host/ehci-sched.c
2382
q.ptr = q_p->ptr;
drivers/usb/host/ehci-sched.c
2386
while (q.ptr != NULL) {
drivers/usb/host/ehci-sched.c
2398
if (q.itd->hw_transaction[uf] &
drivers/usb/host/ehci-sched.c
2403
q_p = &q.itd->itd_next;
drivers/usb/host/ehci-sched.c
2404
hw_p = &q.itd->hw_next;
drivers/usb/host/ehci-sched.c
2406
q.itd->hw_next);
drivers/usb/host/ehci-sched.c
2407
q = *q_p;
drivers/usb/host/ehci-sched.c
2418
*q_p = q.itd->itd_next;
drivers/usb/host/ehci-sched.c
2420
q.itd->hw_next != EHCI_LIST_END(ehci))
drivers/usb/host/ehci-sched.c
2421
*hw_p = q.itd->hw_next;
drivers/usb/host/ehci-sched.c
2424
type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
drivers/usb/host/ehci-sched.c
2426
modified = itd_complete(ehci, q.itd);
drivers/usb/host/ehci-sched.c
2427
q = *q_p;
drivers/usb/host/ehci-sched.c
2439
&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
drivers/usb/host/ehci-sched.c
2441
q_p = &q.sitd->sitd_next;
drivers/usb/host/ehci-sched.c
2442
hw_p = &q.sitd->hw_next;
drivers/usb/host/ehci-sched.c
2443
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
drivers/usb/host/ehci-sched.c
2444
q = *q_p;
drivers/usb/host/ehci-sched.c
2453
*q_p = q.sitd->sitd_next;
drivers/usb/host/ehci-sched.c
2455
q.sitd->hw_next != EHCI_LIST_END(ehci))
drivers/usb/host/ehci-sched.c
2456
*hw_p = q.sitd->hw_next;
drivers/usb/host/ehci-sched.c
2459
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
drivers/usb/host/ehci-sched.c
2461
modified = sitd_complete(ehci, q.sitd);
drivers/usb/host/ehci-sched.c
2462
q = *q_p;
drivers/usb/host/ehci-sched.c
2466
type, frame, q.ptr);
drivers/usb/host/ehci-sched.c
2472
q.ptr = NULL;
drivers/usb/host/oxu210hp-hcd.c
2246
union ehci_shadow *q = &oxu->pshadow[frame];
drivers/usb/host/oxu210hp-hcd.c
2249
while (q->ptr) {
drivers/usb/host/oxu210hp-hcd.c
2254
if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
drivers/usb/host/oxu210hp-hcd.c
2255
usecs += q->qh->usecs;
drivers/usb/host/oxu210hp-hcd.c
2257
if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
drivers/usb/host/oxu210hp-hcd.c
2258
usecs += q->qh->c_usecs;
drivers/usb/host/oxu210hp-hcd.c
2259
hw_p = &q->qh->hw_next;
drivers/usb/host/oxu210hp-hcd.c
2260
q = &q->qh->qh_next;
drivers/usb/host/oxu210hp-hcd.c
2668
union ehci_shadow q, *q_p;
drivers/usb/host/oxu210hp-hcd.c
2682
q.ptr = q_p->ptr;
drivers/usb/host/oxu210hp-hcd.c
2686
while (q.ptr != NULL) {
drivers/usb/host/oxu210hp-hcd.c
2692
temp.qh = qh_get(q.qh);
drivers/usb/host/oxu210hp-hcd.c
2693
type = Q_NEXT_TYPE(q.qh->hw_next);
drivers/usb/host/oxu210hp-hcd.c
2694
q = q.qh->qh_next;
drivers/usb/host/oxu210hp-hcd.c
2702
type, frame, q.ptr);
drivers/usb/host/oxu210hp-hcd.c
2703
q.ptr = NULL;
drivers/usb/musb/musb_host.h
46
static inline struct musb_qh *first_qh(struct list_head *q)
drivers/usb/musb/musb_host.h
48
if (list_empty(q))
drivers/usb/musb/musb_host.h
50
return list_entry(q->next, struct musb_qh, ring);
drivers/usb/serial/digi_acceleport.c
342
wait_queue_head_t *q, long timeout,
drivers/usb/serial/digi_acceleport.c
348
prepare_to_wait(q, &wait, TASK_INTERRUPTIBLE);
drivers/usb/serial/digi_acceleport.c
351
finish_wait(q, &wait);
drivers/usb/storage/transport.c
554
disk = scsi_cmd_to_rq(srb)->q->disk;
drivers/video/console/mdacon.c
204
u16 *q, q_save;
drivers/video/console/mdacon.c
209
q = mda_vram_base + 0x01000 / 2;
drivers/video/console/mdacon.c
212
q_save = scr_readw(q);
drivers/video/console/mdacon.c
230
scr_writew(0xA55A, q);
drivers/video/console/mdacon.c
232
if (scr_readw(q) == 0xA55A)
drivers/video/console/mdacon.c
235
scr_writew(0x5AA5, q);
drivers/video/console/mdacon.c
237
if (scr_readw(q) == 0x5AA5)
drivers/video/console/mdacon.c
241
scr_writew(q_save, q);
drivers/video/fbdev/aty/mach64_ct.c
209
u32 q;
drivers/video/fbdev/aty/mach64_ct.c
214
q = par->ref_clk_per * pll->pll_ref_div * 4 / vclk_per;
drivers/video/fbdev/aty/mach64_ct.c
215
if (q < 16*8 || q > 255*8) {
drivers/video/fbdev/aty/mach64_ct.c
219
pll->vclk_post_div = (q < 128*8);
drivers/video/fbdev/aty/mach64_ct.c
220
pll->vclk_post_div += (q < 64*8);
drivers/video/fbdev/aty/mach64_ct.c
221
pll->vclk_post_div += (q < 32*8);
drivers/video/fbdev/aty/mach64_ct.c
225
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
drivers/video/fbdev/aty/mach64_ct.c
405
u32 q, memcntl, trp;
drivers/video/fbdev/aty/mach64_ct.c
528
q = par->ref_clk_per * pll->ct.pll_ref_div * 8 /
drivers/video/fbdev/aty/mach64_ct.c
531
if (q < 16*8 || q > 255*8) {
drivers/video/fbdev/aty/mach64_ct.c
535
xpost_div = (q < 128*8);
drivers/video/fbdev/aty/mach64_ct.c
536
xpost_div += (q < 64*8);
drivers/video/fbdev/aty/mach64_ct.c
537
xpost_div += (q < 32*8);
drivers/video/fbdev/aty/mach64_ct.c
540
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
drivers/video/fbdev/aty/mach64_ct.c
579
q = par->ref_clk_per * pll->ct.pll_ref_div * 4 / par->mclk_per;
drivers/video/fbdev/aty/mach64_ct.c
580
if (q < 16*8 || q > 255*8) {
drivers/video/fbdev/aty/mach64_ct.c
584
mpost_div = (q < 128*8);
drivers/video/fbdev/aty/mach64_ct.c
585
mpost_div += (q < 64*8);
drivers/video/fbdev/aty/mach64_ct.c
586
mpost_div += (q < 32*8);
drivers/video/fbdev/aty/mach64_ct.c
589
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
drivers/video/fbdev/core/fbcon.c
608
unsigned short *save = NULL, *r, *q;
drivers/video/fbdev/core/fbcon.c
624
q = (unsigned short *) (vc->vc_origin +
drivers/video/fbdev/core/fbcon.c
627
for (r = q - logo_lines * cols; r < q; r++)
drivers/video/fbdev/core/fbcon.c
630
if (r != q && new_rows >= rows + logo_lines) {
drivers/video/fbdev/core/fbcon.c
636
r = q - step;
drivers/video/fbdev/core/fbcon.c
639
r = q;
drivers/video/fbdev/core/fbcon.c
642
if (r == q) {
drivers/video/fbdev/core/fbcon.c
644
r = q - step - cols;
drivers/video/fbdev/core/fbcon.c
669
q = (unsigned short *) (vc->vc_origin +
drivers/video/fbdev/core/fbcon.c
672
scr_memcpyw(q, save, array3_size(logo_lines, new_cols, 2));
drivers/video/fbdev/hgafb.c
282
void __iomem *p, *q;
drivers/video/fbdev/hgafb.c
299
q = hga_vram + 0x01000;
drivers/video/fbdev/hgafb.c
301
p_save = readw(p); q_save = readw(q);
drivers/video/fbdev/matrox/matroxfb_DAC1064.c
848
u_int32_t q;
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
414
unsigned itc, ec, q, sc;
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
436
q = (ptr[2] >> 2) & 0x3;
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
452
(itc << 7) | (ec << 4) | (q << 2) | (sc << 0));
drivers/video/sticore.c
713
unsigned char *n, *p, *q;
drivers/video/sticore.c
726
q = (unsigned char *) f->raw;
drivers/video/sticore.c
728
*p = *q++;
drivers/virt/fsl_hypervisor.c
472
uint32_t q[QSIZE];
drivers/virt/fsl_hypervisor.c
499
dbq->q[dbq->tail] = doorbell;
drivers/virt/fsl_hypervisor.c
637
dbell = dbq->q[dbq->head];
drivers/xen/events/events_fifo.c
105
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
drivers/xen/events/events_fifo.c
112
q->head[i] = 0;
drivers/xen/events/events_fifo.c
277
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
drivers/xen/events/events_fifo.c
282
head = q->head[priority];
drivers/xen/events/events_fifo.c
314
q->head[priority] = head;
drivers/xen/events/events_fifo.c
322
unsigned q;
drivers/xen/events/events_fifo.c
329
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
drivers/xen/events/events_fifo.c
330
consume_one_event(cpu, ctrl, control_block, q, &ready);
drivers/xen/gntdev-dmabuf.c
668
struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
drivers/xen/gntdev-dmabuf.c
671
list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
drivers/xen/gntdev-dmabuf.c
713
struct gntdev_dmabuf *q, *gntdev_dmabuf;
drivers/xen/gntdev-dmabuf.c
715
list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
fs/9p/v9fs_vfs.h
48
#define QID2INO(q) ((ino_t) (((q)->path+2) ^ (((q)->path) >> 32)))
fs/9p/v9fs_vfs.h
50
#define QID2INO(q) ((ino_t) ((q)->path+2))
fs/afs/addr_list.c
156
const char *q, *stop;
fs/afs/addr_list.c
168
q = memchr(p, ']', end - p);
fs/afs/addr_list.c
170
for (q = p; q < end; q++)
fs/afs/addr_list.c
171
if (*q == '+' || *q == delim)
fs/afs/addr_list.c
175
if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) {
fs/afs/addr_list.c
177
} else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) {
fs/afs/addr_list.c
184
p = q;
fs/afs/addr_list.c
190
if (q < end && *q == ']')
fs/afs/addr_prefs.c
87
q = memchr(p, ']', end - p);
fs/afs/addr_prefs.c
88
if (!q) {
fs/afs/addr_prefs.c
93
for (q = p; q < end; q++)
fs/afs/addr_prefs.c
94
if (*q == '/')
fs/autofs/expire.c
102
q = positive_after(root, prev);
fs/autofs/expire.c
106
return q;
fs/autofs/expire.c
98
struct dentry *q;
fs/ceph/caps.c
929
struct rb_node *q;
fs/ceph/caps.c
933
for (q = rb_first(&ci->i_caps); q != p;
fs/ceph/caps.c
934
q = rb_next(q)) {
fs/ceph/caps.c
935
cap = rb_entry(q, struct ceph_cap,
fs/configfs/dir.c
1639
struct list_head *p, *q = &cursor->s_sibling;
fs/configfs/dir.c
1646
list_move(q, &parent_sd->s_children);
fs/configfs/dir.c
1647
for (p = q->next; p != &parent_sd->s_children; p = p->next) {
fs/configfs/dir.c
1687
list_move(q, p);
fs/configfs/dir.c
1688
p = q;
fs/dcache.c
1883
struct qstr q;
fs/dcache.c
1885
q.name = name;
fs/dcache.c
1886
q.hash_len = hashlen_string(parent, name);
fs/dcache.c
1887
return d_alloc(parent, &q);
fs/efivarfs/super.c
195
struct qstr q;
fs/efivarfs/super.c
198
q.name = name;
fs/efivarfs/super.c
199
q.len = strlen(name);
fs/efivarfs/super.c
201
err = efivarfs_d_hash(parent, &q);
fs/efivarfs/super.c
205
d = d_alloc(parent, &q);
fs/erofs/zdata.c
1593
struct z_erofs_decompressqueue *q;
fs/erofs/zdata.c
1596
q = kvzalloc_obj(*q, GFP_KERNEL | __GFP_NOWARN);
fs/erofs/zdata.c
1597
if (!q) {
fs/erofs/zdata.c
1602
kthread_init_work(&q->u.kthread_work,
fs/erofs/zdata.c
1605
INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
fs/erofs/zdata.c
1609
q = fgq;
fs/erofs/zdata.c
1612
q->eio = false;
fs/erofs/zdata.c
1613
q->sync = true;
fs/erofs/zdata.c
1615
q->sb = sb;
fs/erofs/zdata.c
1616
q->head = Z_EROFS_PCLUSTER_TAIL;
fs/erofs/zdata.c
1617
return q;
fs/erofs/zdata.c
1639
struct z_erofs_decompressqueue *q = bio->bi_private;
fs/erofs/zdata.c
1648
if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio))
fs/erofs/zdata.c
1656
q->eio = true;
fs/erofs/zdata.c
1657
z_erofs_decompress_kickoff(q, -1);
fs/erofs/zdata.c
1669
struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
fs/erofs/zdata.c
1679
q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
fs/erofs/zdata.c
1680
q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg);
fs/erofs/zdata.c
1682
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
fs/erofs/zdata.c
1683
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
fs/erofs/zdata.c
1686
q[JQ_SUBMIT]->head = next = f->head;
fs/erofs/zdata.c
1756
bio->bi_private = q[JQ_SUBMIT];
fs/erofs/zdata.c
1791
kvfree(q[JQ_SUBMIT]);
fs/erofs/zdata.c
1794
z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios);
fs/ext2/inode.c
1096
static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
fs/ext2/inode.c
1101
for ( ; p < q ; p++) {
fs/ext2/inode.c
1136
static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth)
fs/ext2/inode.c
1143
for ( ; p < q ; p++) {
fs/ext2/inode.c
1168
ext2_free_data(inode, p, q);
fs/ext2/inode.c
995
static inline int all_zeroes(__le32 *p, __le32 *q)
fs/ext2/inode.c
997
while (p < q)
fs/ext4/indirect.c
754
static inline int all_zeroes(__le32 *p, __le32 *q)
fs/ext4/indirect.c
756
while (p < q)
fs/ext4/namei.c
1323
struct dx_map_entry *p, *q, *top = map + count - 1;
fs/ext4/namei.c
1330
for (p = top, q = p - count; q >= map; p--, q--)
fs/ext4/namei.c
1331
if (p->hash < q->hash)
fs/ext4/namei.c
1332
swap(*p, *q);
fs/ext4/namei.c
1337
q = top;
fs/ext4/namei.c
1338
while (q-- > map) {
fs/ext4/namei.c
1339
if (q[1].hash >= q[0].hash)
fs/ext4/namei.c
1341
swap(*(q+1), *q);
fs/ext4/namei.c
782
struct dx_entry *at, *entries, *p, *q, *m;
fs/ext4/namei.c
878
q = entries + count - 1;
fs/ext4/namei.c
879
while (p <= q) {
fs/ext4/namei.c
880
m = p + (q - p) / 2;
fs/ext4/namei.c
883
q = m - 1;
fs/f2fs/checkpoint.c
2057
wait_queue_head_t *q = &cprc->ckpt_wait_queue;
fs/f2fs/checkpoint.c
2065
wait_event_interruptible(*q,
fs/f2fs/segment.c
1908
wait_queue_head_t *q = &dcc->discard_wait_queue;
fs/f2fs/segment.c
1916
wait_event_freezable_timeout(*q,
fs/f2fs/segment.c
582
wait_queue_head_t *q = &fcc->flush_wait_queue;
fs/f2fs/segment.c
607
wait_event_interruptible(*q,
fs/fs_context.c
439
char *q = kasprintf(GFP_KERNEL, "%c %s%s%pV\n", level,
fs/fs_context.c
453
log->buffer[index] = q ? q : "OOM: Can't store error string";
fs/fs_context.c
454
if (q)
fs/fs_pin.c
88
struct hlist_node *q;
fs/fs_pin.c
90
q = READ_ONCE(p->first);
fs/fs_pin.c
91
if (!q) {
fs/fs_pin.c
95
pin_kill(hlist_entry(q, struct fs_pin, s_list));
fs/fuse/virtio_fs.c
862
unsigned int q, cpu, nr_masks;
fs/fuse/virtio_fs.c
870
for (q = 0; q < fs->num_request_queues; q++) {
fs/fuse/virtio_fs.c
871
mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
fs/fuse/virtio_fs.c
876
fs->mq_map[cpu] = q + VQ_REQUEST;
fs/fuse/virtio_fs.c
890
for (q = 0; q < fs->num_request_queues; q++) {
fs/fuse/virtio_fs.c
891
for_each_cpu(cpu, &masks[q % nr_masks])
fs/fuse/virtio_fs.c
892
fs->mq_map[cpu] = q + VQ_REQUEST;
fs/gfs2/quota.c
1004
memset(&q, 0, sizeof(struct gfs2_quota));
fs/gfs2/quota.c
1006
error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
fs/gfs2/quota.c
1013
qlvb->qb_limit = q.qu_limit;
fs/gfs2/quota.c
1014
qlvb->qb_warn = q.qu_warn;
fs/gfs2/quota.c
1015
qlvb->qb_value = q.qu_value;
fs/gfs2/quota.c
843
struct gfs2_quota q;
fs/gfs2/quota.c
853
memset(&q, 0, sizeof(struct gfs2_quota));
fs/gfs2/quota.c
854
err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
fs/gfs2/quota.c
858
loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
fs/gfs2/quota.c
859
be64_add_cpu(&q.qu_value, change);
fs/gfs2/quota.c
860
if (((s64)be64_to_cpu(q.qu_value)) < 0)
fs/gfs2/quota.c
861
q.qu_value = 0; /* Never go negative on quota usage */
fs/gfs2/quota.c
863
qd->qd_qb.qb_value = q.qu_value;
fs/gfs2/quota.c
866
q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
fs/gfs2/quota.c
867
qd->qd_qb.qb_warn = q.qu_warn;
fs/gfs2/quota.c
870
q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
fs/gfs2/quota.c
871
qd->qd_qb.qb_limit = q.qu_limit;
fs/gfs2/quota.c
874
q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
fs/gfs2/quota.c
875
qd->qd_qb.qb_value = q.qu_value;
fs/gfs2/quota.c
880
err = gfs2_write_disk_quota(sdp, &q, loc);
fs/gfs2/quota.c
999
struct gfs2_quota q;
fs/hpfs/alloc.c
122
unsigned i, q;
fs/hpfs/alloc.c
138
q = nr + n; b = 0;
fs/hpfs/alloc.c
139
while ((a = tstbits(bmp, q, n + forward)) != 0) {
fs/hpfs/alloc.c
140
q += a;
fs/hpfs/alloc.c
141
if (n != 1) q = ((q-1)&~(n-1))+n;
fs/hpfs/alloc.c
143
if (q>>5 != nr>>5) {
fs/hpfs/alloc.c
145
q = nr & 0x1f;
fs/hpfs/alloc.c
147
} else if (q > nr) break;
fs/hpfs/alloc.c
150
ret = bs + q;
fs/hpfs/alloc.c
159
q = i<<5;
fs/hpfs/alloc.c
163
q--; k <<= 1;
fs/hpfs/alloc.c
166
if (n != 1) q = ((q-1)&~(n-1))+n;
fs/hpfs/alloc.c
167
while ((a = tstbits(bmp, q, n + forward)) != 0) {
fs/hpfs/alloc.c
168
q += a;
fs/hpfs/alloc.c
169
if (n != 1) q = ((q-1)&~(n-1))+n;
fs/hpfs/alloc.c
170
if (q>>5 > i) break;
fs/hpfs/alloc.c
173
ret = bs + q;
fs/hpfs/ea.c
289
secno q = hpfs_alloc_sector(s, fno, 1, 0);
fs/hpfs/ea.c
290
if (!q) goto bail;
fs/hpfs/ea.c
291
fnode->ea_secno = cpu_to_le32(q);
fs/jffs2/compr_rubin.c
108
while ((rs->q >= UPPER_BIT_RUBIN) ||
fs/jffs2/compr_rubin.c
109
((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
fs/jffs2/compr_rubin.c
112
ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
fs/jffs2/compr_rubin.c
115
rs->q &= LOWER_BITS_RUBIN;
fs/jffs2/compr_rubin.c
116
rs->q <<= 1;
fs/jffs2/compr_rubin.c
132
rs->q += i0;
fs/jffs2/compr_rubin.c
144
pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1);
fs/jffs2/compr_rubin.c
145
rs->q &= LOWER_BITS_RUBIN;
fs/jffs2/compr_rubin.c
146
rs->q <<= 1;
fs/jffs2/compr_rubin.c
164
unsigned long q)
fs/jffs2/compr_rubin.c
177
q &= lower_bits_rubin;
fs/jffs2/compr_rubin.c
178
q <<= 1;
fs/jffs2/compr_rubin.c
180
} while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN));
fs/jffs2/compr_rubin.c
183
rs->q = q;
fs/jffs2/compr_rubin.c
202
unsigned long p = rs->p, q = rs->q;
fs/jffs2/compr_rubin.c
206
if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN))
fs/jffs2/compr_rubin.c
207
__do_decode(rs, p, q);
fs/jffs2/compr_rubin.c
216
threshold = rs->q + i0;
fs/jffs2/compr_rubin.c
219
rs->q += i0;
fs/jffs2/compr_rubin.c
39
unsigned long q;
fs/jffs2/compr_rubin.c
92
rs->q = 0;
fs/minix/itree_common.c
215
static inline int all_zeroes(block_t *p, block_t *q)
fs/minix/itree_common.c
217
while (p < q)
fs/minix/itree_common.c
263
static inline void free_data(struct inode *inode, block_t *p, block_t *q)
fs/minix/itree_common.c
267
for ( ; p < q ; p++) {
fs/minix/itree_common.c
276
static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth)
fs/minix/itree_common.c
282
for ( ; p < q ; p++) {
fs/minix/itree_common.c
297
free_data(inode, p, q);
fs/namei.c
3726
struct dentry *p = p1, *q = p2, *r;
fs/namei.c
3738
while ((r = q->d_parent) != p1 && r != p && r != q)
fs/namei.c
3739
q = r;
fs/namei.c
3744
return q;
fs/namespace.c
2635
struct mount *q;
fs/namespace.c
2640
q = __lookup_mnt(&child->mnt_parent->mnt,
fs/namespace.c
2643
if (q) {
fs/namespace.c
2649
mnt_change_mountpoint(r, mp, q);
fs/namespace.c
4236
struct mount *p, *q;
fs/namespace.c
4276
q = new;
fs/namespace.c
4278
mnt_add_to_ns(new_ns, q);
fs/namespace.c
4282
new_fs->root.mnt = mntget(&q->mnt);
fs/namespace.c
4286
new_fs->pwd.mnt = mntget(&q->mnt);
fs/namespace.c
4291
q = next_mnt(q, new);
fs/namespace.c
4292
if (!q)
fs/namespace.c
4295
while (p->mnt.mnt_root != q->mnt.mnt_root)
fs/nfs/nfs4proc.c
7659
wait_queue_head_t *q = &clp->cl_lock_waitq;
fs/nfs/nfs4proc.c
7674
add_wait_queue(q, &waiter.wait);
fs/nfs/nfs4proc.c
7686
remove_wait_queue(q, &waiter.wait);
fs/nfsd/xdr4.h
610
__be64 *q = (__be64 *)p;
fs/nfsd/xdr4.h
612
*q = (__force __be64)devid->fsid_idx;
fs/nfsd/xdr4.h
622
__be64 *q = (__be64 *)p;
fs/nfsd/xdr4.h
624
devid->fsid_idx = (__force u64)(*q);
fs/orangefs/orangefs-bufmap.c
101
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
103
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
114
else if (left <= 0 && waitqueue_active(&m->q))
fs/orangefs/orangefs-bufmap.c
115
__wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
fs/orangefs/orangefs-bufmap.c
127
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
13
wait_queue_head_t q;
fs/orangefs/orangefs-bufmap.c
135
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
20
.q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q)
fs/orangefs/orangefs-bufmap.c
24
.q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q)
fs/orangefs/orangefs-bufmap.c
30
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
33
wake_up_all_locked(&m->q);
fs/orangefs/orangefs-bufmap.c
34
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
39
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
41
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
47
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
51
__add_wait_queue_entry_tail(&m->q, &wait);
fs/orangefs/orangefs-bufmap.c
57
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
59
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
61
__remove_wait_queue(&m->q, &wait);
fs/orangefs/orangefs-bufmap.c
65
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
71
spin_lock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
75
wake_up_locked(&m->q);
fs/orangefs/orangefs-bufmap.c
77
wake_up_all_locked(&m->q);
fs/orangefs/orangefs-bufmap.c
78
spin_unlock(&m->q.lock);
fs/orangefs/orangefs-bufmap.c
89
__add_wait_queue_entry_tail_exclusive(&m->q, &wait);
fs/pnode.c
503
struct mount *m, *p, *q;
fs/pnode.c
510
q = propagation_next(p, p);
fs/pnode.c
511
while (q) {
fs/pnode.c
512
struct mount *child = __lookup_mnt(&q->mnt,
fs/pnode.c
523
q = skip_propagation_subtree(q, p);
fs/pnode.c
530
q = propagation_next(q, p);
fs/proc/base.c
539
int q;
fs/proc/base.c
542
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
fs/proc/base.c
543
unsigned long bt = lr->backtrace[q];
fs/proc/bootconfig.c
31
char q;
fs/proc/bootconfig.c
50
q = '\'';
fs/proc/bootconfig.c
52
q = '"';
fs/proc/bootconfig.c
54
q, val, q, xbc_node_is_array(vnode) ? ", " : "\n");
fs/smb/client/cached_dir.c
545
struct cached_dir_dentry *tmp_list, *q;
fs/smb/client/cached_dir.c
582
list_for_each_entry_safe(tmp_list, q, &entry, entry) {
fs/smb/client/cached_dir.c
599
struct cached_fid *cfid, *q;
fs/smb/client/cached_dir.c
610
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
fs/smb/client/cached_dir.c
721
struct cached_dirent *dirent, *q;
fs/smb/client/cached_dir.c
732
list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
fs/smb/client/cached_dir.c
761
struct cached_fid *cfid, *q;
fs/smb/client/cached_dir.c
770
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
fs/smb/client/cached_dir.c
788
list_for_each_entry_safe(cfid, q, &entry, entry) {
fs/smb/client/cached_dir.c
839
struct cached_fid *cfid, *q;
fs/smb/client/cached_dir.c
848
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
fs/smb/client/cached_dir.c
853
list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
fs/smb/client/cached_dir.c
860
list_for_each_entry_safe(cfid, q, &entry, entry) {
fs/smb/client/dir.c
895
static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
fs/smb/client/dir.c
903
for (i = 0; i < q->len; i += charlen) {
fs/smb/client/dir.c
904
charlen = codepage->char2uni(&q->name[i], q->len - i, &c);
fs/smb/client/dir.c
910
q->hash = end_name_hash(hash);
fs/smb/client/misc.c
568
struct tcon_list *tmp_list, *q;
fs/smb/client/misc.c
591
list_for_each_entry_safe(tmp_list, q, &tcon_head, entry) {
fs/ufs/inode.c
132
Indirect chain[4], *q = chain;
fs/ufs/inode.c
150
if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
fs/ufs/inode.c
152
if (!q->key32)
fs/ufs/inode.c
160
fs32_to_cpu(sb, q->key32) + (n>>shift));
fs/ufs/inode.c
164
if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
fs/ufs/inode.c
166
if (!q->key32)
fs/ufs/inode.c
169
res = fs32_to_cpu(sb, q->key32);
fs/ufs/inode.c
173
if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
fs/ufs/inode.c
175
if (!q->key64)
fs/ufs/inode.c
184
fs64_to_cpu(sb, q->key64) + (n>>shift));
fs/ufs/inode.c
188
if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
fs/ufs/inode.c
190
if (!q->key64)
fs/ufs/inode.c
193
res = fs64_to_cpu(sb, q->key64);
fs/ufs/inode.c
197
while (q > chain) {
fs/ufs/inode.c
198
brelse(q->bh);
fs/ufs/inode.c
199
q--;
fs/ufs/inode.c
204
while (q > chain) {
fs/ufs/inode.c
205
brelse(q->bh);
fs/ufs/inode.c
206
q--;
fs/xfs/xfs_dquot.c
123
struct xfs_quotainfo *q = mp->m_quotainfo;
fs/xfs/xfs_dquot.c
128
defq = xfs_get_defquota(q, xfs_dquot_type(dq));
fs/xfs/xfs_dquot.c
1499
struct xfs_dqtrx *q)
fs/xfs/xfs_dquot.c
1506
for (i = 0; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++) {
fs/xfs/xfs_dquot.c
1510
ASSERT(q[i].qt_dquot != q[j].qt_dquot);
fs/xfs/xfs_dquot.c
1515
sort(q, i, sizeof(struct xfs_dqtrx), xfs_dqtrx_cmp, NULL);
fs/xfs/xfs_dquot.c
1517
mutex_lock(&q[0].qt_dquot->q_qlock);
fs/xfs/xfs_dquot.c
1518
for (i = 1; i < XFS_QM_TRANS_MAXDQS && q[i].qt_dquot != NULL; i++)
fs/xfs/xfs_dquot.c
1519
mutex_lock_nested(&q[i].qt_dquot->q_qlock,
fs/xfs/xfs_dquot.c
233
struct xfs_quotainfo *q = mp->m_quotainfo;
fs/xfs/xfs_dquot.c
266
curid = id - (id % q->qi_dqperchunk);
fs/xfs/xfs_dquot.c
267
memset(d, 0, BBTOB(q->qi_dqchunklen));
fs/xfs/xfs_dquot.c
268
for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
fs/xfs/xfs_dquot.c
303
xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
fs/xfs/xfs_dquot.h
223
void xfs_dqlockn(struct xfs_dqtrx *q);
fs/xfs/xfs_qm_syscalls.c
278
struct xfs_quotainfo *q = mp->m_quotainfo;
fs/xfs/xfs_qm_syscalls.c
305
defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
fs/xfs/xfs_quotaops.c
62
struct xfs_quotainfo *q = mp->m_quotainfo;
fs/xfs/xfs_quotaops.c
68
state->s_incoredqs = min_t(uint64_t, q->qi_dquots, UINT_MAX);
fs/xfs/xfs_trans_dquot.c
391
struct xfs_dqtrx *q)
fs/xfs/xfs_trans_dquot.c
394
ASSERT(q[0].qt_dquot != NULL);
fs/xfs/xfs_trans_dquot.c
395
if (q[1].qt_dquot == NULL) {
fs/xfs/xfs_trans_dquot.c
396
mutex_lock(&q[0].qt_dquot->q_qlock);
fs/xfs/xfs_trans_dquot.c
397
xfs_trans_dqjoin(tp, q[0].qt_dquot);
fs/xfs/xfs_trans_dquot.c
398
} else if (q[2].qt_dquot == NULL) {
fs/xfs/xfs_trans_dquot.c
399
xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
fs/xfs/xfs_trans_dquot.c
400
xfs_trans_dqjoin(tp, q[0].qt_dquot);
fs/xfs/xfs_trans_dquot.c
401
xfs_trans_dqjoin(tp, q[1].qt_dquot);
fs/xfs/xfs_trans_dquot.c
403
xfs_dqlockn(q);
fs/xfs/xfs_trans_dquot.c
405
if (q[i].qt_dquot == NULL)
fs/xfs/xfs_trans_dquot.c
407
xfs_trans_dqjoin(tp, q[i].qt_dquot);
fs/xfs/xfs_trans_dquot.c
818
struct xfs_quotainfo *q = mp->m_quotainfo;
fs/xfs/xfs_trans_dquot.c
825
defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
include/crypto/aria.h
436
int q = 4 - (n / 32);
include/crypto/aria.h
440
((y[q % 4]) >> r) ^
include/crypto/aria.h
441
((y[(q + 3) % 4]) << (32 - r));
include/crypto/aria.h
443
((y[(q + 1) % 4]) >> r) ^
include/crypto/aria.h
444
((y[q % 4]) << (32 - r));
include/crypto/aria.h
446
((y[(q + 2) % 4]) >> r) ^
include/crypto/aria.h
447
((y[(q + 1) % 4]) << (32 - r));
include/crypto/aria.h
449
((y[(q + 3) % 4]) >> r) ^
include/crypto/aria.h
450
((y[(q + 2) % 4]) << (32 - r));
include/crypto/b128ops.h
60
static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
include/crypto/b128ops.h
62
r->a = p->a ^ q->a;
include/crypto/b128ops.h
63
r->b = p->b ^ q->b;
include/crypto/b128ops.h
66
static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
include/crypto/b128ops.h
68
r->a = p->a ^ q->a;
include/crypto/b128ops.h
69
r->b = p->b ^ q->b;
include/crypto/internal/ecc.h
305
const u64 *y, const struct ecc_point *q,
include/crypto/internal/rsa.h
37
const u8 *q;
include/linux/bio.h
321
u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
include/linux/blk-integrity.h
114
static inline int blk_rq_count_integrity_sg(struct request_queue *q,
include/linux/blk-integrity.h
119
static inline int blk_rq_map_integrity_sg(struct request *q,
include/linux/blk-integrity.h
150
blk_integrity_queue_supports_integrity(struct request_queue *q)
include/linux/blk-integrity.h
155
queue_max_integrity_segments(const struct request_queue *q)
include/linux/blk-integrity.h
48
blk_integrity_queue_supports_integrity(struct request_queue *q)
include/linux/blk-integrity.h
50
return q->limits.integrity.metadata_size;
include/linux/blk-integrity.h
67
queue_max_integrity_segments(const struct request_queue *q)
include/linux/blk-integrity.h
69
return q->limits.max_integrity_segments;
include/linux/blk-mq.h
1020
static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id)
include/linux/blk-mq.h
1025
hctx = rcu_dereference(q->queue_hw_ctx)[id];
include/linux/blk-mq.h
1031
#define queue_for_each_hw_ctx(q, hctx, i) \
include/linux/blk-mq.h
1032
for ((i) = 0; (i) < (q)->nr_hw_queues && \
include/linux/blk-mq.h
1033
({ hctx = queue_hctx((q), i); 1; }); (i)++)
include/linux/blk-mq.h
1041
if (rq->q->mq_ops->cleanup_rq)
include/linux/blk-mq.h
1042
rq->q->mq_ops->cleanup_rq(rq);
include/linux/blk-mq.h
1053
void blk_rq_init(struct request_queue *q, struct request *rq);
include/linux/blk-mq.h
736
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
include/linux/blk-mq.h
741
struct request_queue *q);
include/linux/blk-mq.h
754
bool blk_mq_queue_inflight(struct request_queue *q);
include/linux/blk-mq.h
765
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
include/linux/blk-mq.h
767
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
include/linux/blk-mq.h
927
void blk_mq_kick_requeue_list(struct request_queue *q);
include/linux/blk-mq.h
928
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
include/linux/blk-mq.h
933
void blk_mq_stop_hw_queues(struct request_queue *q);
include/linux/blk-mq.h
934
void blk_mq_start_hw_queues(struct request_queue *q);
include/linux/blk-mq.h
936
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
include/linux/blk-mq.h
937
void blk_mq_quiesce_queue(struct request_queue *q);
include/linux/blk-mq.h
941
void blk_mq_unquiesce_queue(struct request_queue *q);
include/linux/blk-mq.h
944
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
include/linux/blk-mq.h
945
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
include/linux/blk-mq.h
949
void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
include/linux/blk-mq.h
950
void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
include/linux/blk-mq.h
952
blk_mq_freeze_queue(struct request_queue *q)
include/linux/blk-mq.h
956
blk_mq_freeze_queue_nomemsave(q);
include/linux/blk-mq.h
960
blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
include/linux/blk-mq.h
962
blk_mq_unfreeze_queue_nomemrestore(q);
include/linux/blk-mq.h
965
void blk_freeze_queue_start(struct request_queue *q);
include/linux/blk-mq.h
966
void blk_mq_freeze_queue_wait(struct request_queue *q);
include/linux/blk-mq.h
967
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
include/linux/blk-mq.h
969
void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
include/linux/blk-mq.h
970
void blk_freeze_queue_start_non_owner(struct request_queue *q);
include/linux/blk-mq.h
979
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
include/linux/blk-mq.h
983
bool __blk_should_fake_timeout(struct request_queue *q);
include/linux/blk-mq.h
984
static inline bool blk_should_fake_timeout(struct request_queue *q)
include/linux/blk-mq.h
987
test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
include/linux/blk-mq.h
988
return __blk_should_fake_timeout(q);
include/linux/blk-pm.h
13
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
include/linux/blk-pm.h
14
extern int blk_pre_runtime_suspend(struct request_queue *q);
include/linux/blk-pm.h
15
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
include/linux/blk-pm.h
16
extern void blk_pre_runtime_resume(struct request_queue *q);
include/linux/blk-pm.h
17
extern void blk_post_runtime_resume(struct request_queue *q);
include/linux/blk-pm.h
19
static inline void blk_pm_runtime_init(struct request_queue *q,
include/linux/blkdev.h
1024
extern int blk_lld_busy(struct request_queue *q);
include/linux/blkdev.h
1025
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
include/linux/blkdev.h
1026
extern void blk_queue_exit(struct request_queue *q);
include/linux/blkdev.h
1027
extern void blk_sync_queue(struct request_queue *q);
include/linux/blkdev.h
1086
queue_limits_start_update(struct request_queue *q)
include/linux/blkdev.h
1088
mutex_lock(&q->limits_lock);
include/linux/blkdev.h
1089
return q->limits;
include/linux/blkdev.h
1091
int queue_limits_commit_update_frozen(struct request_queue *q,
include/linux/blkdev.h
1093
int queue_limits_commit_update(struct request_queue *q,
include/linux/blkdev.h
1095
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
include/linux/blkdev.h
1106
static inline void queue_limits_cancel_update(struct request_queue *q)
include/linux/blkdev.h
1108
mutex_unlock(&q->limits_lock);
include/linux/blkdev.h
1118
static inline void blk_queue_disable_discard(struct request_queue *q)
include/linux/blkdev.h
1120
q->limits.max_discard_sectors = 0;
include/linux/blkdev.h
1123
static inline void blk_queue_disable_secure_erase(struct request_queue *q)
include/linux/blkdev.h
1125
q->limits.max_secure_erase_sectors = 0;
include/linux/blkdev.h
1128
static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
include/linux/blkdev.h
1130
q->limits.max_write_zeroes_sectors = 0;
include/linux/blkdev.h
1131
q->limits.max_wzeroes_unmap_sectors = 0;
include/linux/blkdev.h
1137
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
include/linux/blkdev.h
1315
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
include/linux/blkdev.h
1317
return q->limits.seg_boundary_mask;
include/linux/blkdev.h
1320
static inline unsigned long queue_virt_boundary(const struct request_queue *q)
include/linux/blkdev.h
1322
return q->limits.virt_boundary_mask;
include/linux/blkdev.h
1325
static inline unsigned int queue_max_sectors(const struct request_queue *q)
include/linux/blkdev.h
1327
return q->limits.max_sectors;
include/linux/blkdev.h
1330
static inline unsigned int queue_max_bytes(struct request_queue *q)
include/linux/blkdev.h
1332
return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
include/linux/blkdev.h
1335
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
include/linux/blkdev.h
1337
return q->limits.max_hw_sectors;
include/linux/blkdev.h
1340
static inline unsigned short queue_max_segments(const struct request_queue *q)
include/linux/blkdev.h
1342
return q->limits.max_segments;
include/linux/blkdev.h
1345
static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
include/linux/blkdev.h
1347
return q->limits.max_discard_segments;
include/linux/blkdev.h
1350
static inline unsigned int queue_max_segment_size(const struct request_queue *q)
include/linux/blkdev.h
1352
return q->limits.max_segment_size;
include/linux/blkdev.h
1355
static inline bool queue_emulates_zone_append(struct request_queue *q)
include/linux/blkdev.h
1357
return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
include/linux/blkdev.h
1383
static inline unsigned queue_logical_block_size(const struct request_queue *q)
include/linux/blkdev.h
1385
return q->limits.logical_block_size;
include/linux/blkdev.h
1393
static inline unsigned int queue_physical_block_size(const struct request_queue *q)
include/linux/blkdev.h
1395
return q->limits.physical_block_size;
include/linux/blkdev.h
1403
static inline unsigned int queue_io_min(const struct request_queue *q)
include/linux/blkdev.h
1405
return q->limits.io_min;
include/linux/blkdev.h
1413
static inline unsigned int queue_io_opt(const struct request_queue *q)
include/linux/blkdev.h
1415
return q->limits.io_opt;
include/linux/blkdev.h
1424
queue_zone_write_granularity(const struct request_queue *q)
include/linux/blkdev.h
1426
return q->limits.zone_write_granularity;
include/linux/blkdev.h
1482
struct request_queue *q = bdev_get_queue(bdev);
include/linux/blkdev.h
1485
q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
include/linux/blkdev.h
1487
return q->limits.features & BLK_FEAT_STABLE_WRITES;
include/linux/blkdev.h
1490
static inline bool blk_queue_write_cache(struct request_queue *q)
include/linux/blkdev.h
1492
return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
include/linux/blkdev.h
1493
!(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
include/linux/blkdev.h
1523
struct request_queue *q = bdev_get_queue(bdev);
include/linux/blkdev.h
1525
if (!blk_queue_is_zoned(q))
include/linux/blkdev.h
1527
return q->limits.chunk_sectors;
include/linux/blkdev.h
1564
static inline unsigned int queue_dma_alignment(const struct request_queue *q)
include/linux/blkdev.h
1566
return q->limits.dma_alignment;
include/linux/blkdev.h
1570
queue_atomic_write_unit_max_bytes(const struct request_queue *q)
include/linux/blkdev.h
1572
return q->limits.atomic_write_unit_max;
include/linux/blkdev.h
1576
queue_atomic_write_unit_min_bytes(const struct request_queue *q)
include/linux/blkdev.h
1578
return q->limits.atomic_write_unit_min;
include/linux/blkdev.h
1582
queue_atomic_write_boundary_bytes(const struct request_queue *q)
include/linux/blkdev.h
1584
return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
include/linux/blkdev.h
1588
queue_atomic_write_max_bytes(const struct request_queue *q)
include/linux/blkdev.h
1590
return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
include/linux/blkdev.h
1604
static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
include/linux/blkdev.h
1607
unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
include/linux/blkdev.h
1629
struct request_queue *q);
include/linux/blkdev.h
1634
struct request_queue *q)
include/linux/blkdev.h
676
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
include/linux/blkdev.h
677
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
include/linux/blkdev.h
679
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
include/linux/blkdev.h
680
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
include/linux/blkdev.h
681
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
include/linux/blkdev.h
682
#define blk_queue_noxmerges(q) \
include/linux/blkdev.h
683
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
include/linux/blkdev.h
684
#define blk_queue_rot(q) ((q)->limits.features & BLK_FEAT_ROTATIONAL)
include/linux/blkdev.h
685
#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
include/linux/blkdev.h
686
#define blk_queue_passthrough_stat(q) \
include/linux/blkdev.h
687
((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
include/linux/blkdev.h
688
#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
include/linux/blkdev.h
689
#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
include/linux/blkdev.h
691
#define blk_queue_rq_alloc_time(q) \
include/linux/blkdev.h
692
test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
include/linux/blkdev.h
694
#define blk_queue_rq_alloc_time(q) false
include/linux/blkdev.h
700
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
include/linux/blkdev.h
701
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
include/linux/blkdev.h
702
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
include/linux/blkdev.h
703
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
include/linux/blkdev.h
704
#define blk_queue_skip_tagset_quiesce(q) \
include/linux/blkdev.h
705
((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
include/linux/blkdev.h
706
#define blk_queue_disable_wbt(q) \
include/linux/blkdev.h
707
test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
include/linux/blkdev.h
708
#define blk_queue_no_elv_switch(q) \
include/linux/blkdev.h
709
test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
include/linux/blkdev.h
711
extern void blk_set_pm_only(struct request_queue *q);
include/linux/blkdev.h
712
extern void blk_clear_pm_only(struct request_queue *q);
include/linux/blkdev.h
720
static inline bool queue_is_mq(struct request_queue *q)
include/linux/blkdev.h
722
return q->mq_ops;
include/linux/blkdev.h
726
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
include/linux/blkdev.h
728
return q->rpm_status;
include/linux/blkdev.h
731
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
include/linux/blkdev.h
737
static inline bool blk_queue_is_zoned(struct request_queue *q)
include/linux/blkdev.h
740
(q->limits.features & BLK_FEAT_ZONED);
include/linux/blkdev.h
760
static inline unsigned int blk_queue_depth(struct request_queue *q)
include/linux/blkdev.h
762
if (q->queue_depth)
include/linux/blkdev.h
763
return q->queue_depth;
include/linux/blkdev.h
765
return q->nr_requests;
include/linux/blktrace_api.h
50
#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
include/linux/blktrace_api.h
55
bt = rcu_dereference((q)->blk_trace); \
include/linux/blktrace_api.h
60
#define blk_add_trace_msg(q, fmt, ...) \
include/linux/blktrace_api.h
61
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
include/linux/blktrace_api.h
64
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
include/linux/blktrace_api.h
70
bt = rcu_dereference(q->blk_trace);
include/linux/blktrace_api.h
77
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
include/linux/blktrace_api.h
80
extern int blk_trace_startstop(struct request_queue *q, int start);
include/linux/blktrace_api.h
81
extern int blk_trace_remove(struct request_queue *q);
include/linux/blktrace_api.h
85
# define blk_trace_shutdown(q) do { } while (0)
include/linux/blktrace_api.h
87
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
include/linux/blktrace_api.h
88
# define blk_trace_startstop(q, start) (-ENOTTY)
include/linux/blktrace_api.h
89
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
include/linux/blktrace_api.h
90
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
include/linux/blktrace_api.h
91
# define blk_trace_note_message_enabled(q) (false)
include/linux/blktrace_api.h
93
static inline int blk_trace_remove(struct request_queue *q)
include/linux/bsg.h
14
struct bsg_device *bsg_register_queue(struct request_queue *q,
include/linux/clk.h
184
bool clk_is_match(const struct clk *p, const struct clk *q);
include/linux/clk.h
296
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
include/linux/clk.h
298
return p == q;
include/linux/cordic.h
38
s32 q;
include/linux/damon.h
929
void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g);
include/linux/dsa/brcm.h
12
#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
include/linux/fortify-string.h
195
char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
include/linux/fortify-string.h
203
return __underlying_strncpy(p, q, size);
include/linux/fortify-string.h
275
__FORTIFY_INLINE ssize_t sized_strscpy(char * const POS p, const char * const POS q, size_t size)
include/linux/fortify-string.h
279
const size_t q_size = __member_size(q);
include/linux/fortify-string.h
284
return __real_strscpy(p, q, size);
include/linux/fortify-string.h
295
len = __compiletime_strlen(q);
include/linux/fortify-string.h
298
__underlying_memcpy(p, q, len + 1);
include/linux/fortify-string.h
307
len = strnlen(q, size);
include/linux/fortify-string.h
327
return __real_strscpy(p, q, len);
include/linux/fortify-string.h
331
extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat);
include/linux/fortify-string.h
357
size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
include/linux/fortify-string.h
360
const size_t q_size = __member_size(q);
include/linux/fortify-string.h
366
return __real_strlcat(p, q, avail);
include/linux/fortify-string.h
369
copy_len = strlen(q);
include/linux/fortify-string.h
388
__underlying_memcpy(p + p_len, q, copy_len);
include/linux/fortify-string.h
411
char *strcat(char * const POS p, const char *q)
include/linux/fortify-string.h
414
const size_t wanted = strlcat(p, q, p_size);
include/linux/fortify-string.h
442
char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
include/linux/fortify-string.h
445
const size_t q_size = __member_size(q);
include/linux/fortify-string.h
449
return __underlying_strncat(p, q, count);
include/linux/fortify-string.h
451
copy_len = strnlen(q, count);
include/linux/fortify-string.h
455
__underlying_memcpy(p + p_len, q, copy_len);
include/linux/fortify-string.h
623
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
include/linux/fortify-string.h
643
__underlying_##op(p, q, __copy_size); \
include/linux/fortify-string.h
688
#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
include/linux/fortify-string.h
689
__struct_size(p), __struct_size(q), \
include/linux/fortify-string.h
690
__member_size(p), __member_size(q), \
include/linux/fortify-string.h
692
#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
include/linux/fortify-string.h
693
__struct_size(p), __struct_size(q), \
include/linux/fortify-string.h
694
__member_size(p), __member_size(q), \
include/linux/fortify-string.h
710
int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
include/linux/fortify-string.h
713
const size_t q_size = __struct_size(q);
include/linux/fortify-string.h
725
return __underlying_memcmp(p, q, size);
include/linux/fortify-string.h
783
char *strcpy(char * const POS p, const char * const POS q)
include/linux/fortify-string.h
786
const size_t q_size = __member_size(q);
include/linux/fortify-string.h
793
return __underlying_strcpy(p, q);
include/linux/fortify-string.h
794
size = strlen(q) + 1;
include/linux/fortify-string.h
80
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
include/linux/fortify-string.h
801
__underlying_memcpy(p, q, size);
include/linux/fortify-string.h
81
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
include/linux/fortify-string.h
84
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
include/linux/fortify-string.h
85
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
include/linux/fortify-string.h
88
extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
include/linux/fortify-string.h
89
extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
include/linux/fortify-string.h
93
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
include/linux/fortify-string.h
94
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
include/linux/fortify-string.h
95
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
include/linux/fortify-string.h
97
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
include/linux/fortify-string.h
98
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
include/linux/iocontext.h
74
struct request_queue *q;
include/linux/lwq.h
100
static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
include/linux/lwq.h
103
return llist_add(&n->node, &q->new) &&
include/linux/lwq.h
104
smp_load_acquire(&q->ready) == NULL;
include/linux/lwq.h
116
static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
include/linux/lwq.h
121
return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
include/linux/lwq.h
122
smp_load_acquire(&q->ready) == NULL;
include/linux/lwq.h
33
static inline void lwq_init(struct lwq *q)
include/linux/lwq.h
35
spin_lock_init(&q->lock);
include/linux/lwq.h
36
q->ready = NULL;
include/linux/lwq.h
37
init_llist_head(&q->new);
include/linux/lwq.h
48
static inline bool lwq_empty(struct lwq *q)
include/linux/lwq.h
51
return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
include/linux/lwq.h
54
struct llist_node *__lwq_dequeue(struct lwq *q);
include/linux/lwq.h
65
#define lwq_dequeue(q, type, member) \
include/linux/lwq.h
66
({ struct llist_node *_n = __lwq_dequeue(q); \
include/linux/lwq.h
69
struct llist_node *lwq_dequeue_all(struct lwq *q);
include/linux/math.h
142
typeof(x_) q = x_ / d_; \
include/linux/math.h
144
q * n_ + r * n_ / d_; \
include/linux/mlx4/qp.h
497
static inline u16 folded_qp(u32 q)
include/linux/mlx4/qp.h
501
res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00);
include/linux/netdevice.h
3625
void __netif_schedule(struct Qdisc *q);
include/linux/netdevice.h
3933
static inline void netdev_tx_reset_queue(struct netdev_queue *q)
include/linux/netdevice.h
3936
clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
include/linux/netdevice.h
3937
dql_reset(&q->dql);
include/linux/netdevice.h
760
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
include/linux/netdevice.h
763
return q->numa_node;
include/linux/netdevice.h
769
static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
include/linux/netdevice.h
772
q->numa_node = node;
include/linux/posix-timers.h
118
bool posixtimer_init_sigqueue(struct sigqueue *q);
include/linux/posix-timers.h
240
static inline void posixtimer_sigqueue_getref(struct sigqueue *q)
include/linux/posix-timers.h
242
struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
include/linux/posix-timers.h
247
static inline void posixtimer_sigqueue_putref(struct sigqueue *q)
include/linux/posix-timers.h
249
struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
include/linux/posix-timers.h
261
static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { }
include/linux/posix-timers.h
262
static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { }
include/linux/soc/airoha/airoha_offload.h
204
u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
include/linux/soc/airoha/airoha_offload.h
205
void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
include/linux/soc/airoha/airoha_offload.h
206
void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
include/linux/soc/airoha/airoha_offload.h
247
static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu, int q)
include/linux/soc/airoha/airoha_offload.h
249
return npu->ops.wlan_get_irq_status(npu, q);
include/linux/soc/airoha/airoha_offload.h
252
static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
include/linux/soc/airoha/airoha_offload.h
254
npu->ops.wlan_enable_irq(npu, q);
include/linux/soc/airoha/airoha_offload.h
257
static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
include/linux/soc/airoha/airoha_offload.h
259
npu->ops.wlan_disable_irq(npu, q);
include/linux/soc/airoha/airoha_offload.h
303
int q)
include/linux/soc/airoha/airoha_offload.h
308
static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
include/linux/soc/airoha/airoha_offload.h
312
static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
include/linux/sunrpc/sched.h
214
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
include/linux/sunrpc/sched.h
287
static inline const char * rpc_qname(const struct rpc_wait_queue *q)
include/linux/sunrpc/sched.h
289
return ((q && q->name) ? q->name : "unknown");
include/linux/sunrpc/sched.h
292
static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
include/linux/sunrpc/sched.h
295
q->name = name;
include/linux/sunrpc/sched.h
298
static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
include/linux/swait.h
147
extern void swake_up_one(struct swait_queue_head *q);
include/linux/swait.h
148
extern void swake_up_all(struct swait_queue_head *q);
include/linux/swait.h
149
extern void swake_up_locked(struct swait_queue_head *q, int wake_flags);
include/linux/swait.h
151
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
include/linux/swait.h
152
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
include/linux/swait.h
154
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
include/linux/swait.h
155
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
include/linux/swait.h
69
extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
include/linux/swait.h
72
#define init_swait_queue_head(q) \
include/linux/swait.h
75
__init_swait_queue_head((q), #q, &__key); \
include/linux/t10-pi.h
42
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
include/linux/t10-pi.h
45
rq->q->limits.integrity.interval_exp)
include/linux/t10-pi.h
46
shift = rq->q->limits.integrity.interval_exp;
include/linux/t10-pi.h
67
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
include/linux/t10-pi.h
70
rq->q->limits.integrity.interval_exp)
include/linux/t10-pi.h
71
shift = rq->q->limits.integrity.interval_exp;
include/linux/uacce.h
41
struct uacce_queue *q);
include/linux/uacce.h
42
void (*put_queue)(struct uacce_queue *q);
include/linux/uacce.h
43
int (*start_queue)(struct uacce_queue *q);
include/linux/uacce.h
44
void (*stop_queue)(struct uacce_queue *q);
include/linux/uacce.h
45
int (*is_q_updated)(struct uacce_queue *q);
include/linux/uacce.h
46
int (*mmap)(struct uacce_queue *q, struct vm_area_struct *vma,
include/linux/uacce.h
48
long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
include/math-emu/op-1.h
253
#define _FP_SQRT_MEAT_1(R, S, T, X, q) \
include/math-emu/op-1.h
255
while (q != _FP_WORK_ROUND) \
include/math-emu/op-1.h
257
T##_f = S##_f + q; \
include/math-emu/op-1.h
260
S##_f = T##_f + q; \
include/math-emu/op-1.h
262
R##_f += q; \
include/math-emu/op-1.h
265
q >>= 1; \
include/math-emu/op-2.h
524
#define _FP_SQRT_MEAT_2(R, S, T, X, q) \
include/math-emu/op-2.h
526
while (q) \
include/math-emu/op-2.h
528
T##_f1 = S##_f1 + q; \
include/math-emu/op-2.h
531
S##_f1 = T##_f1 + q; \
include/math-emu/op-2.h
533
R##_f1 += q; \
include/math-emu/op-2.h
536
q >>= 1; \
include/math-emu/op-2.h
538
q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
include/math-emu/op-2.h
539
while (q != _FP_WORK_ROUND) \
include/math-emu/op-2.h
541
T##_f0 = S##_f0 + q; \
include/math-emu/op-2.h
546
S##_f0 = T##_f0 + q; \
include/math-emu/op-2.h
549
R##_f0 += q; \
include/math-emu/op-2.h
552
q >>= 1; \
include/math-emu/op-4.h
429
#define _FP_SQRT_MEAT_4(R, S, T, X, q) \
include/math-emu/op-4.h
431
while (q) \
include/math-emu/op-4.h
433
T##_f[3] = S##_f[3] + q; \
include/math-emu/op-4.h
436
S##_f[3] = T##_f[3] + q; \
include/math-emu/op-4.h
438
R##_f[3] += q; \
include/math-emu/op-4.h
441
q >>= 1; \
include/math-emu/op-4.h
443
q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
include/math-emu/op-4.h
444
while (q) \
include/math-emu/op-4.h
446
T##_f[2] = S##_f[2] + q; \
include/math-emu/op-4.h
451
S##_f[2] = T##_f[2] + q; \
include/math-emu/op-4.h
455
R##_f[2] += q; \
include/math-emu/op-4.h
458
q >>= 1; \
include/math-emu/op-4.h
460
q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
include/math-emu/op-4.h
461
while (q) \
include/math-emu/op-4.h
463
T##_f[1] = S##_f[1] + q; \
include/math-emu/op-4.h
470
S##_f[1] = T##_f[1] + q; \
include/math-emu/op-4.h
475
R##_f[1] += q; \
include/math-emu/op-4.h
478
q >>= 1; \
include/math-emu/op-4.h
480
q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
include/math-emu/op-4.h
481
while (q != _FP_WORK_ROUND) \
include/math-emu/op-4.h
483
T##_f[0] = S##_f[0] + q; \
include/math-emu/op-4.h
489
S##_f[0] = T##_f[0] + q; \
include/math-emu/op-4.h
494
R##_f[0] += q; \
include/math-emu/op-4.h
497
q >>= 1; \
include/math-emu/op-common.h
594
_FP_W_TYPE q; \
include/math-emu/op-common.h
636
q = _FP_OVERFLOW_##fs >> 1; \
include/math-emu/op-common.h
637
_FP_SQRT_MEAT_##wc(R, S, T, X, q); \
include/math-emu/op-common.h
880
#define _FP_DIV_HELP_imm(q, r, n, d) \
include/math-emu/op-common.h
882
q = n / d, r = n % d; \
include/media/drv-intf/saa7146_vv.h
145
void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
include/media/drv-intf/saa7146_vv.h
146
void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
include/media/drv-intf/saa7146_vv.h
147
int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
include/media/drv-intf/saa7146_vv.h
77
struct vb2_queue q;
include/media/v4l2-mc.h
212
static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q)
include/media/v4l2-mc.h
86
int v4l_vb2q_enable_media_source(struct vb2_queue *q);
include/media/v4l2-mem2mem.h
443
struct vb2_queue *q);
include/media/v4l2-mem2mem.h
453
struct vb2_queue *q);
include/media/v4l2-mem2mem.h
66
struct vb2_queue q;
include/media/v4l2-mem2mem.h
766
return &m2m_ctx->out_q_ctx.q;
include/media/v4l2-mem2mem.h
777
return &m2m_ctx->cap_q_ctx.q;
include/media/videobuf2-core.h
1003
void vb2_core_queue_release(struct vb2_queue *q);
include/media/videobuf2-core.h
1018
void vb2_queue_error(struct vb2_queue *q);
include/media/videobuf2-core.h
1042
int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma);
include/media/videobuf2-core.h
1060
unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
include/media/videobuf2-core.h
1085
__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
include/media/videobuf2-core.h
1096
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
include/media/videobuf2-core.h
1106
size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
include/media/videobuf2-core.h
1135
int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
include/media/videobuf2-core.h
1142
int vb2_thread_stop(struct vb2_queue *q);
include/media/videobuf2-core.h
1148
static inline bool vb2_is_streaming(struct vb2_queue *q)
include/media/videobuf2-core.h
1150
return q->streaming;
include/media/videobuf2-core.h
1166
static inline bool vb2_fileio_is_active(struct vb2_queue *q)
include/media/videobuf2-core.h
1168
return q->fileio;
include/media/videobuf2-core.h
1175
static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q)
include/media/videobuf2-core.h
1177
if (q->bufs_bitmap)
include/media/videobuf2-core.h
1178
return bitmap_weight(q->bufs_bitmap, q->max_num_buffers);
include/media/videobuf2-core.h
1189
static inline bool vb2_is_busy(struct vb2_queue *q)
include/media/videobuf2-core.h
1191
return !!q->is_busy;
include/media/videobuf2-core.h
1198
static inline void *vb2_get_drv_priv(struct vb2_queue *q)
include/media/videobuf2-core.h
1200
return q->drv_priv;
include/media/videobuf2-core.h
1256
static inline bool vb2_start_streaming_called(struct vb2_queue *q)
include/media/videobuf2-core.h
1258
return q->start_streaming_called;
include/media/videobuf2-core.h
1265
static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
include/media/videobuf2-core.h
1267
q->last_buffer_dequeued = false;
include/media/videobuf2-core.h
1280
static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
include/media/videobuf2-core.h
1283
if (!q->bufs)
include/media/videobuf2-core.h
1286
if (index >= q->max_num_buffers)
include/media/videobuf2-core.h
1289
if (test_bit(index, q->bufs_bitmap))
include/media/videobuf2-core.h
1290
return q->bufs[index];
include/media/videobuf2-core.h
1306
bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb);
include/media/videobuf2-core.h
1318
int vb2_verify_memory_type(struct vb2_queue *q,
include/media/videobuf2-core.h
428
int (*queue_setup)(struct vb2_queue *q,
include/media/videobuf2-core.h
438
int (*prepare_streaming)(struct vb2_queue *q);
include/media/videobuf2-core.h
439
int (*start_streaming)(struct vb2_queue *q, unsigned int count);
include/media/videobuf2-core.h
440
void (*stop_streaming)(struct vb2_queue *q);
include/media/videobuf2-core.h
441
void (*unprepare_streaming)(struct vb2_queue *q);
include/media/videobuf2-core.h
686
static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
include/media/videobuf2-core.h
688
return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP;
include/media/videobuf2-core.h
750
void vb2_discard_done(struct vb2_queue *q);
include/media/videobuf2-core.h
761
int vb2_wait_for_all_buffers(struct vb2_queue *q);
include/media/videobuf2-core.h
778
void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
include/media/videobuf2-core.h
809
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
include/media/videobuf2-core.h
835
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
include/media/videobuf2-core.h
861
int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
include/media/videobuf2-core.h
871
int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count);
include/media/videobuf2-core.h
899
int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
include/media/videobuf2-core.h
925
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
include/media/videobuf2-core.h
940
int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
include/media/videobuf2-core.h
955
int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
include/media/videobuf2-core.h
976
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
include/media/videobuf2-core.h
993
int vb2_core_queue_init(struct vb2_queue *q);
include/media/videobuf2-v4l2.h
119
int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
include/media/videobuf2-v4l2.h
145
int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
include/media/videobuf2-v4l2.h
157
int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb);
include/media/videobuf2-v4l2.h
183
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking);
include/media/videobuf2-v4l2.h
201
int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type);
include/media/videobuf2-v4l2.h
220
int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
include/media/videobuf2-v4l2.h
233
int __must_check vb2_queue_init(struct vb2_queue *q);
include/media/videobuf2-v4l2.h
246
int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name);
include/media/videobuf2-v4l2.h
256
void vb2_queue_release(struct vb2_queue *q);
include/media/videobuf2-v4l2.h
272
int vb2_queue_change_type(struct vb2_queue *q, unsigned int type);
include/media/videobuf2-v4l2.h
293
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
include/media/videobuf2-v4l2.h
317
static inline bool vb2_queue_is_busy(struct vb2_queue *q, struct file *file)
include/media/videobuf2-v4l2.h
319
return q->owner && q->owner != file->private_data;
include/media/videobuf2-v4l2.h
73
struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp);
include/media/videobuf2-v4l2.h
75
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
include/media/videobuf2-v4l2.h
85
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
include/media/videobuf2-v4l2.h
95
int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
include/net/gen_stats.h
62
struct gnet_stats_queue *q, __u32 qlen);
include/net/gen_stats.h
65
const struct gnet_stats_queue *q);
include/net/inet_frag.h
110
void (*constructor)(struct inet_frag_queue *q,
include/net/inet_frag.h
129
void inet_frag_kill(struct inet_frag_queue *q, int *refs);
include/net/inet_frag.h
130
void inet_frag_destroy(struct inet_frag_queue *q);
include/net/inet_frag.h
133
void inet_frag_queue_flush(struct inet_frag_queue *q,
include/net/inet_frag.h
136
static inline void inet_frag_putn(struct inet_frag_queue *q, int refs)
include/net/inet_frag.h
138
if (refs && refcount_sub_and_test(refs, &q->refcnt))
include/net/inet_frag.h
139
inet_frag_destroy(q);
include/net/inet_frag.h
174
int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
include/net/inet_frag.h
176
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
include/net/inet_frag.h
178
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
include/net/inet_frag.h
180
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
include/net/ipv6_frag.h
101
head = inet_frag_pull_head(&fq->q);
include/net/ipv6_frag.h
106
spin_unlock(&fq->q.lock);
include/net/ipv6_frag.h
113
spin_unlock(&fq->q.lock);
include/net/ipv6_frag.h
116
inet_frag_putn(&fq->q, refs);
include/net/ipv6_frag.h
24
struct inet_frag_queue q;
include/net/ipv6_frag.h
32
static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
include/net/ipv6_frag.h
34
struct frag_queue *fq = container_of(q, struct frag_queue, q);
include/net/ipv6_frag.h
37
q->key.v6 = *key;
include/net/ipv6_frag.h
72
spin_lock(&fq->q.lock);
include/net/ipv6_frag.h
74
if (fq->q.flags & INET_FRAG_COMPLETE)
include/net/ipv6_frag.h
77
fq->q.flags |= INET_FRAG_DROP;
include/net/ipv6_frag.h
78
inet_frag_kill(&fq->q, &refs);
include/net/ipv6_frag.h
81
if (READ_ONCE(fq->q.fqdir->dead)) {
include/net/ipv6_frag.h
82
inet_frag_queue_flush(&fq->q, 0);
include/net/ipv6_frag.h
94
if (!(fq->q.flags & INET_FRAG_FIRST_IN))
include/net/mana/gdma.h
294
typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
include/net/mana/gdma.h
297
typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
include/net/pkt_cls.h
123
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
include/net/pkt_cls.h
130
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
include/net/pkt_cls.h
142
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
include/net/pkt_cls.h
169
__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
include/net/pkt_cls.h
173
cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
include/net/pkt_cls.h
176
q->ops->cl_ops->unbind_tcf(q, cl);
include/net/pkt_cls.h
182
struct Qdisc *q = tp->chain->block->q;
include/net/pkt_cls.h
187
if (!q)
include/net/pkt_cls.h
189
sch_tree_lock(q);
include/net/pkt_cls.h
190
__tcf_bind_filter(q, r, base);
include/net/pkt_cls.h
191
sch_tree_unlock(q);
include/net/pkt_cls.h
195
__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
include/net/pkt_cls.h
200
q->ops->cl_ops->unbind_tcf(q, cl);
include/net/pkt_cls.h
206
struct Qdisc *q = tp->chain->block->q;
include/net/pkt_cls.h
208
if (!q)
include/net/pkt_cls.h
210
__tcf_unbind_filter(q, r);
include/net/pkt_cls.h
214
void *q, struct tcf_result *res,
include/net/pkt_cls.h
219
__tcf_bind_filter(q, res, base);
include/net/pkt_cls.h
221
__tcf_unbind_filter(q, res);
include/net/pkt_cls.h
56
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
include/net/pkt_cls.h
58
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
include/net/pkt_cls.h
62
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
include/net/pkt_cls.h
89
return block->q;
include/net/pkt_sched.h
102
void qdisc_hash_add(struct Qdisc *q, bool invisible);
include/net/pkt_sched.h
103
void qdisc_hash_del(struct Qdisc *q);
include/net/pkt_sched.h
111
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
include/net/pkt_sched.h
115
void __qdisc_run(struct Qdisc *q);
include/net/pkt_sched.h
117
static inline struct sk_buff *qdisc_run(struct Qdisc *q)
include/net/pkt_sched.h
119
if (qdisc_run_begin(q)) {
include/net/pkt_sched.h
120
__qdisc_run(q);
include/net/pkt_sched.h
121
return qdisc_run_end(q);
include/net/pkt_sched.h
136
static inline struct net *qdisc_net(struct Qdisc *q)
include/net/pkt_sched.h
138
return dev_net(q->dev_queue->dev);
include/net/pkt_sched.h
23
#define qdisc_priv(q) \
include/net/pkt_sched.h
24
_Generic(q, \
include/net/pkt_sched.h
25
const struct Qdisc * : (const void *)&q->privdata, \
include/net/pkt_sched.h
26
struct Qdisc * : (void *)&q->privdata)
include/net/pkt_sched.h
314
spin_lock_init(&sch->q.lock);
include/net/pkt_sched.h
322
lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
include/net/pkt_sched.h
90
int fifo_set_limit(struct Qdisc *q, unsigned int limit);
include/net/sch_generic.h
1068
__qdisc_enqueue_tail(skb, &sch->q);
include/net/sch_generic.h
1105
sch->q.qlen--;
include/net/sch_generic.h
1110
skb = __qdisc_dequeue_head(&sch->q);
include/net/sch_generic.h
1121
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
include/net/sch_generic.h
117
struct qdisc_skb_head q;
include/net/sch_generic.h
1170
static inline void qdisc_dequeue_drop(struct Qdisc *q, struct sk_buff *skb,
include/net/sch_generic.h
1173
DEBUG_NET_WARN_ON_ONCE(!(q->flags & TCQ_F_DEQUEUE_DROPS));
include/net/sch_generic.h
1174
DEBUG_NET_WARN_ON_ONCE(q->flags & TCQ_F_NOLOCK);
include/net/sch_generic.h
1177
skb->next = q->to_free;
include/net/sch_generic.h
1178
q->to_free = skb;
include/net/sch_generic.h
1219
const struct qdisc_skb_head *qh = &sch->q;
include/net/sch_generic.h
1237
sch->q.qlen++;
include/net/sch_generic.h
1254
sch->q.qlen--;
include/net/sch_generic.h
1266
sch->q.qlen++;
include/net/sch_generic.h
1282
sch->q.qlen--;
include/net/sch_generic.h
1309
__qdisc_reset_queue(&sch->q);
include/net/sch_generic.h
1470
static inline void qdisc_synchronize(const struct Qdisc *q)
include/net/sch_generic.h
1472
while (test_bit(__QDISC_STATE_SCHED, &q->state))
include/net/sch_generic.h
184
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
include/net/sch_generic.h
186
return q->flags & TCQ_F_CPUSTATS;
include/net/sch_generic.h
193
return !READ_ONCE(qdisc->q.qlen);
include/net/sch_generic.h
493
struct Qdisc *q;
include/net/sch_generic.h
537
static inline int qdisc_qlen(const struct Qdisc *q)
include/net/sch_generic.h
539
return q->q.qlen;
include/net/sch_generic.h
542
static inline int qdisc_qlen_sum(const struct Qdisc *q)
include/net/sch_generic.h
544
__u32 qlen = q->qstats.qlen;
include/net/sch_generic.h
547
if (qdisc_is_percpu_stats(q)) {
include/net/sch_generic.h
549
qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
include/net/sch_generic.h
551
qlen += q->q.qlen;
include/net/sch_generic.h
564
return &qdisc->q.lock;
include/net/sch_generic.h
569
struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
include/net/sch_generic.h
571
return q;
include/net/sch_generic.h
597
static inline void sch_tree_lock(struct Qdisc *q)
include/net/sch_generic.h
599
if (q->flags & TCQ_F_MQROOT)
include/net/sch_generic.h
600
spin_lock_bh(qdisc_lock(q));
include/net/sch_generic.h
602
spin_lock_bh(qdisc_root_sleeping_lock(q));
include/net/sch_generic.h
605
static inline void sch_tree_unlock(struct Qdisc *q)
include/net/sch_generic.h
607
if (q->flags & TCQ_F_MQROOT)
include/net/sch_generic.h
608
spin_unlock_bh(qdisc_lock(q));
include/net/sch_generic.h
610
spin_unlock_bh(qdisc_root_sleeping_lock(q));
include/net/sch_generic.h
748
int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
include/net/sch_generic.h
756
qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
include/net/sch_generic.h
759
q->flags &= ~TCQ_F_OFFLOADED;
include/net/sch_generic.h
838
const struct Qdisc *q = rcu_dereference(txq->qdisc);
include/net/sch_generic.h
840
if (!qdisc_is_empty(q)) {
include/net/sctp/stream_interleave.h
37
void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
include/net/sctp/stream_sched.h
33
void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg);
include/net/sctp/stream_sched.h
35
struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
include/net/sctp/stream_sched.h
37
void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
include/net/sctp/stream_sched.h
51
void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch);
include/net/sctp/stream_sched.h
53
void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
include/net/sctp/structs.h
1113
void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
include/net/sctp/structs.h
1119
void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
include/net/sctp/structs.h
1121
static inline void sctp_outq_cork(struct sctp_outq *q)
include/net/sctp/structs.h
1123
q->cork = 1;
include/net/xdp.h
322
netmem_ref q[XDP_BULK_QUEUE_SIZE];
include/net/xdp.h
461
page_pool_put_netmem_bulk(bq->q, bq->count);
include/scsi/scsi_cmnd.h
397
struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
include/scsi/scsi_device.h
329
if (__rq->q->disk) \
include/scsi/scsi_device.h
331
__rq->q->disk->disk_name, ##a); \
include/scsi/scsi_device.h
410
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
include/scsi/scsi_dh.h
81
static inline const char *scsi_dh_attached_handler_name(struct request_queue *q,
include/scsi/scsi_transport_sas.h
100
struct request_queue *q;
include/sound/asequencer.h
73
#define snd_seq_queue_sync_port(q) ((q) + 16)
include/sound/core.h
409
#define snd_pci_quirk_name(q) ((q)->name)
include/sound/core.h
417
#define snd_pci_quirk_name(q) ""
include/trace/events/block.h
135
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
include/trace/events/block.h
208
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
include/trace/events/block.h
309
TP_PROTO(struct request_queue *q, struct bio *bio),
include/trace/events/block.h
311
TP_ARGS(q, bio),
include/trace/events/block.h
428
TP_PROTO(struct request_queue *q),
include/trace/events/block.h
430
TP_ARGS(q),
include/trace/events/block.h
445
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
include/trace/events/block.h
447
TP_ARGS(q, depth, explicit),
include/trace/events/block.h
473
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
include/trace/events/block.h
475
TP_ARGS(q, depth, explicit)
include/trace/events/block.h
585
__entry->dev = disk_devt(rq->q->disk);
include/trace/events/block.h
639
TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
include/trace/events/block.h
642
TP_ARGS(q, zno, sector, nr_sectors),
include/trace/events/block.h
652
__entry->dev = disk_devt(q->disk);
include/trace/events/block.h
666
TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
include/trace/events/block.h
669
TP_ARGS(q, zno, sector, nr_sectors)
include/trace/events/block.h
674
TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
include/trace/events/block.h
677
TP_ARGS(q, zno, sector, nr_sectors)
include/trace/events/block.h
99
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
include/trace/events/qdisc.h
104
TP_PROTO(struct Qdisc *q),
include/trace/events/qdisc.h
106
TP_ARGS(q),
include/trace/events/qdisc.h
109
__string( dev, qdisc_dev(q)->name )
include/trace/events/qdisc.h
110
__string( kind, q->ops->id )
include/trace/events/qdisc.h
118
__entry->parent = q->parent;
include/trace/events/qdisc.h
119
__entry->handle = q->handle;
include/trace/events/qdisc.h
79
TP_PROTO(struct Qdisc *q),
include/trace/events/qdisc.h
81
TP_ARGS(q),
include/trace/events/qdisc.h
84
__string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" )
include/trace/events/qdisc.h
85
__string( kind, q->ops->id )
include/trace/events/qdisc.h
93
__entry->parent = q->parent;
include/trace/events/qdisc.h
94
__entry->handle = q->handle;
include/trace/events/sunrpc.h
418
TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q),
include/trace/events/sunrpc.h
420
TP_ARGS(task, q),
include/trace/events/sunrpc.h
429
__string(q_name, rpc_qname(q))
include/trace/events/sunrpc.h
457
const struct rpc_wait_queue *q \
include/trace/events/sunrpc.h
459
TP_ARGS(task, q))
include/trace/events/v4l2.h
182
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/v4l2.h
183
TP_ARGS(q, vb),
include/trace/events/v4l2.h
205
struct v4l2_fh *owner = q->owner;
include/trace/events/v4l2.h
246
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/v4l2.h
247
TP_ARGS(q, vb)
include/trace/events/v4l2.h
251
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/v4l2.h
252
TP_ARGS(q, vb)
include/trace/events/v4l2.h
256
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/v4l2.h
257
TP_ARGS(q, vb)
include/trace/events/v4l2.h
261
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/v4l2.h
262
TP_ARGS(q, vb)
include/trace/events/vb2.h
12
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/vb2.h
13
TP_ARGS(q, vb),
include/trace/events/vb2.h
26
__entry->owner = q->owner;
include/trace/events/vb2.h
27
__entry->queued_count = q->queued_count;
include/trace/events/vb2.h
29
atomic_read(&q->owned_by_drv_count);
include/trace/events/vb2.h
47
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/vb2.h
48
TP_ARGS(q, vb)
include/trace/events/vb2.h
52
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/vb2.h
53
TP_ARGS(q, vb)
include/trace/events/vb2.h
57
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/vb2.h
58
TP_ARGS(q, vb)
include/trace/events/vb2.h
62
TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb),
include/trace/events/vb2.h
63
TP_ARGS(q, vb)
include/xen/arm/interface.h
16
typedef struct { union { type *p; uint64_aligned_t q; }; } \
init/initramfs.c
105
q = kmalloc_obj(struct hash);
init/initramfs.c
106
if (!q)
init/initramfs.c
108
q->major = major;
init/initramfs.c
109
q->minor = minor;
init/initramfs.c
110
q->ino = ino;
init/initramfs.c
111
q->mode = mode;
init/initramfs.c
112
strscpy(q->name, name);
init/initramfs.c
113
q->next = NULL;
init/initramfs.c
114
*p = q;
init/initramfs.c
121
struct hash **p, *q;
init/initramfs.c
124
q = *p;
init/initramfs.c
125
*p = q->next;
init/initramfs.c
126
kfree(q);
init/initramfs.c
93
struct hash **p, *q;
init/main.c
334
const char *val, *q;
init/main.c
357
q = strpbrk(val, " \t\r\n") ? "\"" : "";
init/main.c
359
xbc_namebuf, q, val, q);
io_uring/cmd_net.c
105
if (skb_queue_empty_lockless(q))
io_uring/cmd_net.c
109
scoped_guard(spinlock_irq, &q->lock) {
io_uring/cmd_net.c
110
skb_queue_walk_safe(q, skb, tmp) {
io_uring/cmd_net.c
114
__skb_unlink(skb, q);
io_uring/cmd_net.c
130
scoped_guard(spinlock_irqsave, &q->lock)
io_uring/cmd_net.c
131
skb_queue_splice(&list, q);
io_uring/cmd_net.c
94
struct sk_buff_head *q = &sk->sk_error_queue;
io_uring/futex.c
157
static void io_futex_wakev_fn(struct wake_q_head *wake_q, struct futex_q *q)
io_uring/futex.c
159
struct io_kiocb *req = q->wake_data;
io_uring/futex.c
164
if (unlikely(!__futex_wake_mark(q)))
io_uring/futex.c
208
static void io_futex_wake_fn(struct wake_q_head *wake_q, struct futex_q *q)
io_uring/futex.c
210
struct io_futex_data *ifd = container_of(q, struct io_futex_data, q);
io_uring/futex.c
213
if (unlikely(!__futex_wake_mark(q)))
io_uring/futex.c
26
struct futex_q q;
io_uring/futex.c
295
ifd->q = futex_q_init;
io_uring/futex.c
296
ifd->q.bitset = iof->futex_mask;
io_uring/futex.c
297
ifd->q.wake = io_futex_wake_fn;
io_uring/futex.c
301
&ifd->q, NULL, NULL);
io_uring/futex.c
98
if (!futex_unqueue(&ifd->q))
ipc/sem.c
1072
static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
ipc/sem.c
1075
struct sembuf *sop = q->blocking;
ipc/sem.c
1110
struct sem_queue *q;
ipc/sem.c
1120
list_for_each_entry(q, l, list) {
ipc/sem.c
1128
list_for_each_entry(q, &sma->pending_alter, list) {
ipc/sem.c
1129
semcnt += check_qop(sma, semnum, q, count_zero);
ipc/sem.c
1132
list_for_each_entry(q, &sma->pending_const, list) {
ipc/sem.c
1133
semcnt += check_qop(sma, semnum, q, count_zero);
ipc/sem.c
1146
struct sem_queue *q, *tq;
ipc/sem.c
1163
list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
ipc/sem.c
1164
unlink_queue(sma, q);
ipc/sem.c
1165
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
ipc/sem.c
1168
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
ipc/sem.c
1169
unlink_queue(sma, q);
ipc/sem.c
1170
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
ipc/sem.c
1174
list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
ipc/sem.c
1175
unlink_queue(sma, q);
ipc/sem.c
1176
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
ipc/sem.c
1178
list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
ipc/sem.c
1179
unlink_queue(sma, q);
ipc/sem.c
1180
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
ipc/sem.c
285
struct sem_queue *q, *tq;
ipc/sem.c
295
list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
ipc/sem.c
297
curr = &sma->sems[q->sops[0].sem_num];
ipc/sem.c
299
list_add_tail(&q->list, &curr->pending_alter);
ipc/sem.c
646
static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
ipc/sem.c
655
sops = q->sops;
ipc/sem.c
656
nsops = q->nsops;
ipc/sem.c
657
un = q->undo;
ipc/sem.c
686
pid = q->pid;
ipc/sem.c
699
q->blocking = sop;
ipc/sem.c
719
static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
ipc/sem.c
727
sops = q->sops;
ipc/sem.c
728
nsops = q->nsops;
ipc/sem.c
729
un = q->undo;
ipc/sem.c
731
if (unlikely(q->dupsop))
ipc/sem.c
732
return perform_atomic_semop_slow(sma, q);
ipc/sem.c
776
ipc_update_pid(&curr->sempid, q->pid);
ipc/sem.c
782
q->blocking = sop;
ipc/sem.c
786
static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
ipc/sem.c
791
sleeper = get_task_struct(q->sleeper);
ipc/sem.c
794
smp_store_release(&q->status, error);
ipc/sem.c
799
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
ipc/sem.c
801
list_del(&q->list);
ipc/sem.c
802
if (q->nsops > 1)
ipc/sem.c
816
static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
ipc/sem.c
823
if (q->nsops > 1)
ipc/sem.c
857
struct sem_queue *q, *tmp;
ipc/sem.c
866
list_for_each_entry_safe(q, tmp, pending_list, list) {
ipc/sem.c
867
int error = perform_atomic_semop(sma, q);
ipc/sem.c
872
unlink_queue(sma, q);
ipc/sem.c
874
wake_up_sem_queue_prepare(q, error, wake_q);
ipc/sem.c
951
struct sem_queue *q, *tmp;
ipc/sem.c
961
list_for_each_entry_safe(q, tmp, pending_list, list) {
ipc/sem.c
974
error = perform_atomic_semop(sma, q);
ipc/sem.c
980
unlink_queue(sma, q);
ipc/sem.c
986
do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
ipc/sem.c
987
restart = check_restart(sma, q);
ipc/sem.c
990
wake_up_sem_queue_prepare(q, error, wake_q);
kernel/audit.c
969
while ((skb = __skb_dequeue(&dest->q)) != NULL)
kernel/audit.h
249
struct sk_buff_head q;
kernel/audit_tree.c
613
struct list_head *p, *q;
kernel/audit_tree.c
620
for (p = tree->chunks.next; p != &tree->chunks; p = q) {
kernel/audit_tree.c
622
q = p->next;
kernel/auditfilter.c
1079
static void audit_list_rules(int seq, struct sk_buff_head *q)
kernel/auditfilter.c
1098
skb_queue_tail(q, skb);
kernel/auditfilter.c
1104
skb_queue_tail(q, skb);
kernel/auditfilter.c
1188
skb_queue_head_init(&dest->q);
kernel/auditfilter.c
1191
audit_list_rules(seq, &dest->q);
kernel/auditfilter.c
1196
skb_queue_purge(&dest->q);
kernel/auditsc.c
274
struct audit_tree_refs *q;
kernel/auditsc.c
286
for (q = p; q != ctx->trees; q = q->next, n = 31) {
kernel/auditsc.c
288
audit_put_chunk(q->c[n]);
kernel/auditsc.c
289
q->c[n] = NULL;
kernel/auditsc.c
293
audit_put_chunk(q->c[n]);
kernel/auditsc.c
294
q->c[n] = NULL;
kernel/auditsc.c
302
struct audit_tree_refs *p, *q;
kernel/auditsc.c
304
for (p = ctx->first_trees; p; p = q) {
kernel/auditsc.c
305
q = p->next;
kernel/bpf/cpumap.c
52
void *q[CPU_MAP_BULK_SIZE];
kernel/bpf/cpumap.c
725
struct ptr_ring *q;
kernel/bpf/cpumap.c
733
q = rcpu->queue;
kernel/bpf/cpumap.c
734
spin_lock(&q->producer_lock);
kernel/bpf/cpumap.c
737
struct xdp_frame *xdpf = bq->q[i];
kernel/bpf/cpumap.c
740
err = __ptr_ring_produce(q, xdpf);
kernel/bpf/cpumap.c
748
spin_unlock(&q->producer_lock);
kernel/bpf/cpumap.c
779
bq->q[bq->count++] = xdpf;
kernel/bpf/devmap.c
392
struct xdp_frame *xdpf = bq->q[i];
kernel/bpf/devmap.c
398
to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
kernel/bpf/devmap.c
403
sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
kernel/bpf/devmap.c
416
xdp_return_frame_rx_napi(bq->q[i]);
kernel/bpf/devmap.c
489
bq->q[bq->count++] = xdpf;
kernel/bpf/devmap.c
58
struct xdp_frame *q[DEV_MAP_BULK_SIZE];
kernel/bpf/liveness.c
710
struct live_stack_query *q = &env->liveness->live_stack_query;
kernel/bpf/liveness.c
714
memset(q, 0, sizeof(*q));
kernel/bpf/liveness.c
719
q->instances[frame] = instance;
kernel/bpf/liveness.c
721
q->curframe = st->curframe;
kernel/bpf/liveness.c
722
q->insn_idx = st->insn_idx;
kernel/bpf/liveness.c
734
struct live_stack_query *q = &env->liveness->live_stack_query;
kernel/bpf/liveness.c
739
curframe_instance = q->instances[q->curframe];
kernel/bpf/liveness.c
740
if (is_live_before(curframe_instance, q->insn_idx, frameno, spi))
kernel/bpf/liveness.c
743
for (i = frameno; i < q->curframe; i++) {
kernel/bpf/liveness.c
745
instance = q->instances[i];
kernel/bpf/tnum.c
289
u64 tmax, j, p, q, r, s, v, u, w, res;
kernel/bpf/tnum.c
308
q = U64_MAX << k;
kernel/bpf/tnum.c
309
r = q & z; /* positions > k matched to z */
kernel/bpf/tnum.c
310
s = ~q & t.value; /* positions <= k matched to t.value */
kernel/bpf/tnum.c
316
q = U64_MAX << k;
kernel/bpf/tnum.c
317
r = q & t.mask & z; /* unknown positions > k, matched to z */
kernel/bpf/tnum.c
318
s = q & ~t.mask; /* known positions > k, set to 1 */
kernel/cgroup/cpuset-v1.c
339
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
kernel/cgroup/cpuset-v1.c
341
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
kernel/cgroup/cpuset-v1.c
342
nodes_subset(p->mems_allowed, q->mems_allowed) &&
kernel/cgroup/cpuset-v1.c
343
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
kernel/cgroup/cpuset-v1.c
344
is_mem_exclusive(p) <= is_mem_exclusive(q);
kernel/cgroup/pids.c
168
struct pids_cgroup *p, *q;
kernel/cgroup/pids.c
193
for (q = pids; q != p; q = parent_pids(q))
kernel/cgroup/pids.c
194
pids_cancel(q, num);
kernel/crash_reserve.c
229
char *q;
kernel/crash_reserve.c
239
q = end_p - strlen(suffix_tbl[i]);
kernel/crash_reserve.c
240
if (!strncmp(q, suffix_tbl[i],
kernel/crash_reserve.c
246
q = end_p - strlen(suffix);
kernel/crash_reserve.c
247
if (!strncmp(q, suffix, strlen(suffix)))
kernel/events/uprobes.c
1565
struct list_head *pos, *q;
kernel/events/uprobes.c
1571
list_for_each_safe(pos, q, &delayed_uprobe_list) {
kernel/events/uprobes.c
261
struct list_head *pos, *q;
kernel/events/uprobes.c
267
list_for_each_safe(pos, q, &delayed_uprobe_list) {
kernel/futex/core.c
1000
__futex_unqueue(q);
kernel/futex/core.c
1002
BUG_ON(!q->pi_state);
kernel/futex/core.c
1003
put_pi_state(q->pi_state);
kernel/futex/core.c
1004
q->pi_state = NULL;
kernel/futex/core.c
852
void __futex_unqueue(struct futex_q *q)
kernel/futex/core.c
856
if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
kernel/futex/core.c
858
lockdep_assert_held(q->lock_ptr);
kernel/futex/core.c
860
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
kernel/futex/core.c
861
plist_del(&q->list, &hb->chain);
kernel/futex/core.c
866
void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb)
kernel/futex/core.c
879
q->lock_ptr = &hb->lock;
kernel/futex/core.c
891
void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
kernel/futex/core.c
906
plist_node_init(&q->list, prio);
kernel/futex/core.c
907
plist_add(&q->list, &hb->chain);
kernel/futex/core.c
908
q->task = task;
kernel/futex/core.c
922
int futex_unqueue(struct futex_q *q)
kernel/futex/core.c
936
lock_ptr = READ_ONCE(q->lock_ptr);
kernel/futex/core.c
952
if (unlikely(lock_ptr != q->lock_ptr)) {
kernel/futex/core.c
956
__futex_unqueue(q);
kernel/futex/core.c
958
BUG_ON(q->pi_state);
kernel/futex/core.c
967
void futex_q_lockptr_lock(struct futex_q *q)
kernel/futex/core.c
976
lock_ptr = READ_ONCE(q->lock_ptr);
kernel/futex/core.c
979
if (unlikely(lock_ptr != q->lock_ptr)) {
kernel/futex/core.c
989
void futex_unqueue_pi(struct futex_q *q)
kernel/futex/core.c
999
if (!plist_node_empty(&q->list))
kernel/futex/futex.h
163
typedef void (futex_wake_fn)(struct wake_q_head *wake_q, struct futex_q *q);
kernel/futex/futex.h
220
extern void futex_q_lockptr_lock(struct futex_q *q);
kernel/futex/futex.h
264
struct futex_q *q, union futex_key *key2,
kernel/futex/futex.h
266
extern void futex_do_wait(struct futex_q *q, struct hrtimer_sleeper *timeout);
kernel/futex/futex.h
267
extern bool __futex_wake_mark(struct futex_q *q);
kernel/futex/futex.h
268
extern void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q);
kernel/futex/futex.h
291
extern void __futex_unqueue(struct futex_q *q);
kernel/futex/futex.h
292
extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
kernel/futex/futex.h
294
extern int futex_unqueue(struct futex_q *q);
kernel/futex/futex.h
311
static inline void futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
kernel/futex/futex.h
315
__futex_queue(q, hb, task);
kernel/futex/futex.h
319
extern void futex_unqueue_pi(struct futex_q *q);
kernel/futex/futex.h
361
extern void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb);
kernel/futex/futex.h
375
extern int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked);
kernel/futex/futex.h
425
struct futex_q q;
kernel/futex/pi.c
1028
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
kernel/futex/pi.c
1029
spin_unlock(q.lock_ptr);
kernel/futex/pi.c
1035
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q);
kernel/futex/pi.c
1036
raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q);
kernel/futex/pi.c
1047
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
kernel/futex/pi.c
1068
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
kernel/futex/pi.c
1076
futex_q_lockptr_lock(&q);
kernel/futex/pi.c
1086
res = fixup_pi_owner(uaddr, &q, !ret);
kernel/futex/pi.c
1094
futex_unqueue_pi(&q);
kernel/futex/pi.c
1095
spin_unlock(q.lock_ptr);
kernel/futex/pi.c
1096
if (q.drop_hb_ref) {
kernel/futex/pi.c
1097
CLASS(hb, hb)(&q.key);
kernel/futex/pi.c
671
static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
kernel/futex/pi.c
674
struct futex_pi_state *pi_state = q->pi_state;
kernel/futex/pi.c
792
spin_unlock(q->lock_ptr);
kernel/futex/pi.c
809
futex_q_lockptr_lock(q);
kernel/futex/pi.c
843
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
kernel/futex/pi.c
846
struct futex_pi_state *pi_state = q->pi_state;
kernel/futex/pi.c
849
lockdep_assert_held(q->lock_ptr);
kernel/futex/pi.c
852
ret = __fixup_pi_state_owner(uaddr, q, argowner);
kernel/futex/pi.c
872
int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked)
kernel/futex/pi.c
883
if (q->pi_state->owner != current)
kernel/futex/pi.c
884
return fixup_pi_state_owner(uaddr, q, current);
kernel/futex/pi.c
896
if (q->pi_state->owner == current)
kernel/futex/pi.c
897
return fixup_pi_state_owner(uaddr, q, NULL);
kernel/futex/pi.c
903
if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
kernel/futex/pi.c
904
return fixup_pi_state_owner(uaddr, q, current);
kernel/futex/pi.c
923
struct futex_q q = futex_q_init;
kernel/futex/pi.c
937
ret = get_futex_key(uaddr, flags, &q.key, FUTEX_WRITE);
kernel/futex/pi.c
943
CLASS(hb, hb)(&q.key);
kernel/futex/pi.c
945
futex_q_lock(&q, hb);
kernel/futex/pi.c
947
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
kernel/futex/pi.c
983
WARN_ON(!q.pi_state);
kernel/futex/pi.c
988
__futex_queue(&q, hb, current);
kernel/futex/pi.c
991
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
kernel/futex/requeue.c
109
old = atomic_read_acquire(&q->requeue_state);
kernel/futex/requeue.c
126
} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
kernel/futex/requeue.c
128
q->pi_state = pi_state;
kernel/futex/requeue.c
132
static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
kernel/futex/requeue.c
136
old = atomic_read_acquire(&q->requeue_state);
kernel/futex/requeue.c
154
} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
kernel/futex/requeue.c
159
rcuwait_wake_up(&q->requeue_wait);
kernel/futex/requeue.c
163
static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
kernel/futex/requeue.c
167
old = atomic_read_acquire(&q->requeue_state);
kernel/futex/requeue.c
180
} while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
kernel/futex/requeue.c
185
rcuwait_wait_event(&q->requeue_wait,
kernel/futex/requeue.c
186
atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
kernel/futex/requeue.c
189
(void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
kernel/futex/requeue.c
198
return atomic_read(&q->requeue_state);
kernel/futex/requeue.c
230
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
kernel/futex/requeue.c
235
q->key = *key;
kernel/futex/requeue.c
236
__futex_unqueue(q);
kernel/futex/requeue.c
238
WARN_ON(!q->rt_waiter);
kernel/futex/requeue.c
239
q->rt_waiter = NULL;
kernel/futex/requeue.c
245
q->drop_hb_ref = true;
kernel/futex/requeue.c
246
q->lock_ptr = &hb->lock;
kernel/futex/requeue.c
247
task = READ_ONCE(q->task);
kernel/futex/requeue.c
250
futex_requeue_pi_complete(q, 1);
kernel/futex/requeue.c
709
struct futex_q *q,
kernel/futex/requeue.c
721
WARN_ON_ONCE(&hb->lock != q->lock_ptr);
kernel/futex/requeue.c
727
plist_del(&q->list, &hb->chain);
kernel/futex/requeue.c
76
void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
kernel/futex/requeue.c
786
struct futex_q q = futex_q_init;
kernel/futex/requeue.c
812
q.bitset = bitset;
kernel/futex/requeue.c
813
q.rt_waiter = &rt_waiter;
kernel/futex/requeue.c
814
q.requeue_pi_key = &key2;
kernel/futex/requeue.c
820
ret = futex_wait_setup(uaddr, val, flags, &q, &key2, current);
kernel/futex/requeue.c
825
futex_do_wait(&q, to);
kernel/futex/requeue.c
827
switch (futex_requeue_pi_wakeup_sync(&q)) {
kernel/futex/requeue.c
830
CLASS(hb, hb)(&q.key);
kernel/futex/requeue.c
833
ret = handle_early_requeue_pi_wakeup(hb, &q, to);
kernel/futex/requeue.c
840
if (q.pi_state && (q.pi_state->owner != current)) {
kernel/futex/requeue.c
841
futex_q_lockptr_lock(&q);
kernel/futex/requeue.c
842
ret = fixup_pi_owner(uaddr2, &q, true);
kernel/futex/requeue.c
847
put_pi_state(q.pi_state);
kernel/futex/requeue.c
848
spin_unlock(q.lock_ptr);
kernel/futex/requeue.c
85
plist_del(&q->list, &hb1->chain);
kernel/futex/requeue.c
859
pi_mutex = &q.pi_state->pi_mutex;
kernel/futex/requeue.c
868
futex_q_lockptr_lock(&q);
kernel/futex/requeue.c
874
res = fixup_pi_owner(uaddr2, &q, !ret);
kernel/futex/requeue.c
88
plist_add(&q->list, &hb2->chain);
kernel/futex/requeue.c
882
futex_unqueue_pi(&q);
kernel/futex/requeue.c
883
spin_unlock(q.lock_ptr);
kernel/futex/requeue.c
89
q->lock_ptr = &hb2->lock;
kernel/futex/requeue.c
900
if (q.drop_hb_ref) {
kernel/futex/requeue.c
901
CLASS(hb, hb)(&q.key);
kernel/futex/requeue.c
96
q->key = *key2;
kernel/futex/requeue.c
99
static inline bool futex_requeue_pi_prepare(struct futex_q *q,
kernel/futex/syscalls.c
247
futexv[i].q = futex_q_init;
kernel/futex/syscalls.c
248
futexv[i].q.wake = wake;
kernel/futex/syscalls.c
249
futexv[i].q.wake_data = wake_data;
kernel/futex/waitwake.c
110
bool __futex_wake_mark(struct futex_q *q)
kernel/futex/waitwake.c
112
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
kernel/futex/waitwake.c
115
__futex_unqueue(q);
kernel/futex/waitwake.c
123
smp_store_release(&q->lock_ptr, NULL);
kernel/futex/waitwake.c
134
void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q)
kernel/futex/waitwake.c
136
struct task_struct *p = q->task;
kernel/futex/waitwake.c
140
if (!__futex_wake_mark(q)) {
kernel/futex/waitwake.c
341
void futex_do_wait(struct futex_q *q, struct hrtimer_sleeper *timeout)
kernel/futex/waitwake.c
351
if (likely(!plist_node_empty(&q->list))) {
kernel/futex/waitwake.c
379
if (!futex_unqueue(&v[i].q))
kernel/futex/waitwake.c
435
&vs[i].q.key, FUTEX_READ);
kernel/futex/waitwake.c
445
struct futex_q *q = &vs[i].q;
kernel/futex/waitwake.c
449
CLASS(hb, hb)(&q->key);
kernel/futex/waitwake.c
451
futex_q_lock(q, hb);
kernel/futex/waitwake.c
460
futex_queue(q, hb, current);
kernel/futex/waitwake.c
515
if (!READ_ONCE(vs->q.lock_ptr))
kernel/futex/waitwake.c
592
struct futex_q *q, union futex_key *key2,
kernel/futex/waitwake.c
617
ret = get_futex_key(uaddr, flags, &q->key, FUTEX_READ);
kernel/futex/waitwake.c
623
CLASS(hb, hb)(&q->key);
kernel/futex/waitwake.c
625
futex_q_lock(q, hb);
kernel/futex/waitwake.c
647
if (key2 && futex_match(&q->key, key2)) {
kernel/futex/waitwake.c
660
futex_queue(q, hb, task);
kernel/futex/waitwake.c
669
struct futex_q q = futex_q_init;
kernel/futex/waitwake.c
675
q.bitset = bitset;
kernel/futex/waitwake.c
682
ret = futex_wait_setup(uaddr, val, flags, &q, NULL, current);
kernel/futex/waitwake.c
687
futex_do_wait(&q, to);
kernel/futex/waitwake.c
690
if (!futex_unqueue(&q))
kernel/latencytop.c
122
int q, same = 1;
kernel/latencytop.c
130
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
kernel/latencytop.c
131
unsigned long record = lat->backtrace[q];
kernel/latencytop.c
133
if (latency_record[i].backtrace[q] != record) {
kernel/latencytop.c
179
int i, q;
kernel/latencytop.c
207
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
kernel/latencytop.c
208
unsigned long record = lat.backtrace[q];
kernel/latencytop.c
210
if (mylat->backtrace[q] != record) {
kernel/latencytop.c
252
int q;
kernel/latencytop.c
255
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
kernel/latencytop.c
256
unsigned long bt = lr->backtrace[q];
kernel/ptrace.c
715
struct sigqueue *q;
kernel/ptrace.c
744
list_for_each_entry(q, &pending->list, list) {
kernel/ptrace.c
747
copy_siginfo(&info, &q->info);
kernel/sched/sched.h
3763
extern void swake_up_all_locked(struct swait_queue_head *q);
kernel/sched/sched.h
3764
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
kernel/sched/swait.c
10
raw_spin_lock_init(&q->lock);
kernel/sched/swait.c
103
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
kernel/sched/swait.c
108
raw_spin_lock_irqsave(&q->lock, flags);
kernel/sched/swait.c
11
lockdep_set_class_and_name(&q->lock, key, name);
kernel/sched/swait.c
117
__prepare_to_swait(q, wait);
kernel/sched/swait.c
12
INIT_LIST_HEAD(&q->task_list);
kernel/sched/swait.c
120
raw_spin_unlock_irqrestore(&q->lock, flags);
kernel/sched/swait.c
126
void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
kernel/sched/swait.c
133
void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
kernel/sched/swait.c
140
raw_spin_lock_irqsave(&q->lock, flags);
kernel/sched/swait.c
142
raw_spin_unlock_irqrestore(&q->lock, flags);
kernel/sched/swait.c
22
void swake_up_locked(struct swait_queue_head *q, int wake_flags)
kernel/sched/swait.c
26
if (list_empty(&q->task_list))
kernel/sched/swait.c
29
curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
kernel/sched/swait.c
42
void swake_up_all_locked(struct swait_queue_head *q)
kernel/sched/swait.c
44
while (!list_empty(&q->task_list))
kernel/sched/swait.c
45
swake_up_locked(q, 0);
kernel/sched/swait.c
48
void swake_up_one(struct swait_queue_head *q)
kernel/sched/swait.c
52
raw_spin_lock_irqsave(&q->lock, flags);
kernel/sched/swait.c
53
swake_up_locked(q, 0);
kernel/sched/swait.c
54
raw_spin_unlock_irqrestore(&q->lock, flags);
kernel/sched/swait.c
62
void swake_up_all(struct swait_queue_head *q)
kernel/sched/swait.c
67
raw_spin_lock_irq(&q->lock);
kernel/sched/swait.c
68
list_splice_init(&q->task_list, &tmp);
kernel/sched/swait.c
7
void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
kernel/sched/swait.c
78
raw_spin_unlock_irq(&q->lock);
kernel/sched/swait.c
79
raw_spin_lock_irq(&q->lock);
kernel/sched/swait.c
81
raw_spin_unlock_irq(&q->lock);
kernel/sched/swait.c
85
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
kernel/sched/swait.c
89
list_add_tail(&wait->task_list, &q->task_list);
kernel/sched/swait.c
92
void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
kernel/sched/swait.c
96
raw_spin_lock_irqsave(&q->lock, flags);
kernel/sched/swait.c
97
__prepare_to_swait(q, wait);
kernel/sched/swait.c
99
raw_spin_unlock_irqrestore(&q->lock, flags);
kernel/signal.c
1046
struct sigqueue *q;
kernel/signal.c
1087
q = sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
kernel/signal.c
1089
if (q) {
kernel/signal.c
1090
list_add_tail(&q->list, &pending->list);
kernel/signal.c
1093
clear_siginfo(&q->info);
kernel/signal.c
1094
q->info.si_signo = sig;
kernel/signal.c
1095
q->info.si_errno = 0;
kernel/signal.c
1096
q->info.si_code = SI_USER;
kernel/signal.c
1097
q->info.si_pid = task_tgid_nr_ns(current,
kernel/signal.c
1100
q->info.si_uid =
kernel/signal.c
1106
clear_siginfo(&q->info);
kernel/signal.c
1107
q->info.si_signo = sig;
kernel/signal.c
1108
q->info.si_errno = 0;
kernel/signal.c
1109
q->info.si_code = SI_KERNEL;
kernel/signal.c
1110
q->info.si_pid = 0;
kernel/signal.c
1111
q->info.si_uid = 0;
kernel/signal.c
1114
copy_siginfo(&q->info, info);
kernel/signal.c
1903
struct sigqueue *q, *n;
kernel/signal.c
1908
list_for_each_entry_safe(q, n, &pending->list, list) {
kernel/signal.c
1909
int sig = q->info.si_signo;
kernel/signal.c
1911
if (likely(q->info.si_code != SI_TIMER)) {
kernel/signal.c
1915
list_del_init(&q->list);
kernel/signal.c
1916
__sigqueue_free(q);
kernel/signal.c
1932
bool posixtimer_init_sigqueue(struct sigqueue *q)
kernel/signal.c
1938
clear_siginfo(&q->info);
kernel/signal.c
1939
__sigqueue_init(q, ucounts, SIGQUEUE_PREALLOC);
kernel/signal.c
1943
static void posixtimer_queue_sigqueue(struct sigqueue *q, struct task_struct *t, enum pid_type type)
kernel/signal.c
1946
int sig = q->info.si_signo;
kernel/signal.c
1950
list_add_tail(&q->list, &pending->list);
kernel/signal.c
1978
struct sigqueue *q = &tmr->sigq;
kernel/signal.c
1979
int sig = q->info.si_signo;
kernel/signal.c
2009
if (!list_empty(&q->list)) {
kernel/signal.c
2042
posixtimer_sigqueue_getref(q);
kernel/signal.c
2043
posixtimer_sig_ignore(t, q);
kernel/signal.c
2061
if (unlikely(!list_empty(&q->list))) {
kernel/signal.c
2076
posixtimer_sigqueue_getref(q);
kernel/signal.c
2080
posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
kernel/signal.c
2083
trace_signal_generate(sig, &q->info, t, tmr->it_pid_type != PIDTYPE_PID, result);
kernel/signal.c
2087
static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q)
kernel/signal.c
2089
struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
kernel/signal.c
2147
static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q) { }
kernel/signal.c
433
static void __sigqueue_init(struct sigqueue *q, struct ucounts *ucounts,
kernel/signal.c
436
INIT_LIST_HEAD(&q->list);
kernel/signal.c
437
q->flags = sigqueue_flags;
kernel/signal.c
438
q->ucounts = ucounts;
kernel/signal.c
450
struct sigqueue *q;
kernel/signal.c
455
q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
kernel/signal.c
456
if (!q) {
kernel/signal.c
461
__sigqueue_init(q, ucounts, 0);
kernel/signal.c
462
return q;
kernel/signal.c
465
static void __sigqueue_free(struct sigqueue *q)
kernel/signal.c
467
if (q->flags & SIGQUEUE_PREALLOC) {
kernel/signal.c
468
posixtimer_sigqueue_putref(q);
kernel/signal.c
471
if (q->ucounts) {
kernel/signal.c
472
dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
kernel/signal.c
473
q->ucounts = NULL;
kernel/signal.c
475
kmem_cache_free(sigqueue_cachep, q);
kernel/signal.c
480
struct sigqueue *q;
kernel/signal.c
484
q = list_entry(queue->list.next, struct sigqueue , list);
kernel/signal.c
485
list_del_init(&q->list);
kernel/signal.c
486
__sigqueue_free(q);
kernel/signal.c
556
struct sigqueue *q, *first = NULL;
kernel/signal.c
562
list_for_each_entry(q, &list->list, list) {
kernel/signal.c
563
if (q->info.si_signo == sig) {
kernel/signal.c
566
first = q;
kernel/signal.c
672
struct sigqueue *q, *sync = NULL;
kernel/signal.c
683
list_for_each_entry(q, &pending->list, list) {
kernel/signal.c
685
if ((q->info.si_code > SI_USER) &&
kernel/signal.c
686
(sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
kernel/signal.c
687
sync = q;
kernel/signal.c
696
list_for_each_entry_continue(q, &pending->list, list) {
kernel/signal.c
697
if (q->info.si_signo == sync->info.si_signo)
kernel/signal.c
738
static inline void posixtimer_sig_ignore(struct task_struct *tsk, struct sigqueue *q);
kernel/signal.c
740
static void sigqueue_free_ignored(struct task_struct *tsk, struct sigqueue *q)
kernel/signal.c
742
if (likely(!(q->flags & SIGQUEUE_PREALLOC) || q->info.si_code != SI_TIMER))
kernel/signal.c
743
__sigqueue_free(q);
kernel/signal.c
745
posixtimer_sig_ignore(tsk, q);
kernel/signal.c
751
struct sigqueue *q, *n;
kernel/signal.c
761
list_for_each_entry_safe(q, n, &s->list, list) {
kernel/signal.c
762
if (sigismember(mask, q->info.si_signo)) {
kernel/signal.c
763
list_del_init(&q->list);
kernel/signal.c
764
sigqueue_free_ignored(p, q);
kernel/trace/blktrace.c
1002
return blk_trace_bio_get_cgid(rq->q, rq->bio);
kernel/trace/blktrace.c
1027
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1079
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1101
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
kernel/trace/blktrace.c
1107
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1115
blk_trace_bio_get_cgid(q, bio));
kernel/trace/blktrace.c
1120
struct request_queue *q, struct bio *bio)
kernel/trace/blktrace.c
1122
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
kernel/trace/blktrace.c
1148
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
kernel/trace/blktrace.c
1153
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1159
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
kernel/trace/blktrace.c
1165
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1180
static void blk_add_trace_zone_plug(void *ignore, struct request_queue *q,
kernel/trace/blktrace.c
1187
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1196
static void blk_add_trace_zone_unplug(void *ignore, struct request_queue *q,
kernel/trace/blktrace.c
1203
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1213
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
kernel/trace/blktrace.c
1217
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1225
blk_trace_bio_get_cgid(q, bio));
kernel/trace/blktrace.c
1242
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
kernel/trace/blktrace.c
1247
bt = rcu_dereference(q->blk_trace);
kernel/trace/blktrace.c
1260
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
kernel/trace/blktrace.c
1283
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1290
r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
kernel/trace/blktrace.c
1314
bt = rcu_dereference(rq->q->blk_trace);
kernel/trace/blktrace.c
1880
static int blk_trace_remove_queue(struct request_queue *q)
kernel/trace/blktrace.c
1884
bt = rcu_replace_pointer(q->blk_trace, NULL,
kernel/trace/blktrace.c
1885
lockdep_is_held(&q->debugfs_mutex));
kernel/trace/blktrace.c
1893
blk_trace_free(q, bt);
kernel/trace/blktrace.c
1900
static int blk_trace_setup_queue(struct request_queue *q,
kernel/trace/blktrace.c
1919
rcu_assign_pointer(q->blk_trace, bt);
kernel/trace/blktrace.c
1924
blk_trace_free(q, bt);
kernel/trace/blktrace.c
2042
struct request_queue *q = bdev_get_queue(bdev);
kernel/trace/blktrace.c
2046
blk_debugfs_lock_nomemsave(q);
kernel/trace/blktrace.c
2048
bt = rcu_dereference_protected(q->blk_trace,
kernel/trace/blktrace.c
2049
lockdep_is_held(&q->debugfs_mutex));
kernel/trace/blktrace.c
2067
blk_debugfs_unlock_nomemrestore(q);
kernel/trace/blktrace.c
2076
struct request_queue *q = bdev_get_queue(bdev);
kernel/trace/blktrace.c
2098
memflags = blk_debugfs_lock(q);
kernel/trace/blktrace.c
2100
bt = rcu_dereference_protected(q->blk_trace,
kernel/trace/blktrace.c
2101
lockdep_is_held(&q->debugfs_mutex));
kernel/trace/blktrace.c
2108
ret = blk_trace_setup_queue(q, bdev);
kernel/trace/blktrace.c
2110
ret = blk_trace_remove_queue(q);
kernel/trace/blktrace.c
2116
ret = blk_trace_setup_queue(q, bdev);
kernel/trace/blktrace.c
2117
bt = rcu_dereference_protected(q->blk_trace,
kernel/trace/blktrace.c
2118
lockdep_is_held(&q->debugfs_mutex));
kernel/trace/blktrace.c
2133
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
469
static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
kernel/trace/blktrace.c
478
debugfs_lookup_and_remove("dropped", q->debugfs_dir);
kernel/trace/blktrace.c
479
debugfs_lookup_and_remove("msg", q->debugfs_dir);
kernel/trace/blktrace.c
535
static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
kernel/trace/blktrace.c
539
blk_trace_free(q, bt);
kernel/trace/blktrace.c
543
static int __blk_trace_remove(struct request_queue *q)
kernel/trace/blktrace.c
547
bt = rcu_replace_pointer(q->blk_trace, NULL,
kernel/trace/blktrace.c
548
lockdep_is_held(&q->debugfs_mutex));
kernel/trace/blktrace.c
552
blk_trace_cleanup(q, bt);
kernel/trace/blktrace.c
557
int blk_trace_remove(struct request_queue *q)
kernel/trace/blktrace.c
561
blk_debugfs_lock_nomemsave(q);
kernel/trace/blktrace.c
562
ret = __blk_trace_remove(q);
kernel/trace/blktrace.c
563
blk_debugfs_unlock_nomemrestore(q);
kernel/trace/blktrace.c
652
static struct blk_trace *blk_trace_setup_prepare(struct request_queue *q,
kernel/trace/blktrace.c
661
lockdep_assert_held(&q->debugfs_mutex);
kernel/trace/blktrace.c
667
if (rcu_dereference_protected(q->blk_trace,
kernel/trace/blktrace.c
668
lockdep_is_held(&q->debugfs_mutex))) {
kernel/trace/blktrace.c
693
dir = q->debugfs_dir;
kernel/trace/blktrace.c
725
blk_trace_free(q, bt);
kernel/trace/blktrace.c
730
static void blk_trace_setup_finalize(struct request_queue *q,
kernel/trace/blktrace.c
758
rcu_assign_pointer(q->blk_trace, bt);
kernel/trace/blktrace.c
762
int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
kernel/trace/blktrace.c
788
memflags = blk_debugfs_lock(q);
kernel/trace/blktrace.c
789
bt = blk_trace_setup_prepare(q, name, dev, buts.buf_size, buts.buf_nr,
kernel/trace/blktrace.c
792
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
795
blk_trace_setup_finalize(q, name, 1, bt, &buts2);
kernel/trace/blktrace.c
797
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
800
blk_trace_remove(q);
kernel/trace/blktrace.c
807
static int blk_trace_setup2(struct request_queue *q, char *name, dev_t dev,
kernel/trace/blktrace.c
823
memflags = blk_debugfs_lock(q);
kernel/trace/blktrace.c
824
bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
kernel/trace/blktrace.c
827
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
830
blk_trace_setup_finalize(q, name, 2, bt, &buts2);
kernel/trace/blktrace.c
831
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
834
blk_trace_remove(q);
kernel/trace/blktrace.c
841
static int compat_blk_trace_setup(struct request_queue *q, char *name,
kernel/trace/blktrace.c
865
memflags = blk_debugfs_lock(q);
kernel/trace/blktrace.c
866
bt = blk_trace_setup_prepare(q, name, dev, buts2.buf_size, buts2.buf_nr,
kernel/trace/blktrace.c
869
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
872
blk_trace_setup_finalize(q, name, 1, bt, &buts2);
kernel/trace/blktrace.c
873
blk_debugfs_unlock(q, memflags);
kernel/trace/blktrace.c
876
blk_trace_remove(q);
kernel/trace/blktrace.c
884
static int __blk_trace_startstop(struct request_queue *q, int start)
kernel/trace/blktrace.c
888
bt = rcu_dereference_protected(q->blk_trace,
kernel/trace/blktrace.c
889
lockdep_is_held(&q->debugfs_mutex));
kernel/trace/blktrace.c
899
int blk_trace_startstop(struct request_queue *q, int start)
kernel/trace/blktrace.c
903
blk_debugfs_lock_nomemsave(q);
kernel/trace/blktrace.c
904
ret = __blk_trace_startstop(q, start);
kernel/trace/blktrace.c
905
blk_debugfs_unlock_nomemrestore(q);
kernel/trace/blktrace.c
926
struct request_queue *q = bdev_get_queue(bdev);
kernel/trace/blktrace.c
933
ret = blk_trace_setup2(q, b, bdev->bd_dev, bdev, arg);
kernel/trace/blktrace.c
937
ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
kernel/trace/blktrace.c
942
ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
kernel/trace/blktrace.c
949
ret = blk_trace_startstop(q, start);
kernel/trace/blktrace.c
952
ret = blk_trace_remove(q);
kernel/trace/blktrace.c
966
void blk_trace_shutdown(struct request_queue *q)
kernel/trace/blktrace.c
968
if (rcu_dereference_protected(q->blk_trace,
kernel/trace/blktrace.c
969
lockdep_is_held(&q->debugfs_mutex)))
kernel/trace/blktrace.c
970
__blk_trace_remove(q);
kernel/trace/blktrace.c
974
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
kernel/trace/blktrace.c
980
bt = rcu_dereference_protected(q->blk_trace, 1);
kernel/trace/blktrace.c
990
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
kernel/trace/trace.c
3059
char *q;
kernel/trace/trace.c
3068
new_fmt = q = iter->fmt;
kernel/trace/trace.c
3070
if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
kernel/trace/trace.c
3074
q += iter->fmt - new_fmt;
kernel/trace/trace.c
3078
*q++ = *p++;
kernel/trace/trace.c
3083
*q++ = *p++;
kernel/trace/trace.c
3085
*q++ = *p++;
kernel/trace/trace.c
3086
*q++ = 'x';
kernel/trace/trace.c
3090
*q = '\0';
kernel/trace/trace_boot.c
564
char *q;
kernel/trace/trace_boot.c
567
q = kstrdup(p, GFP_KERNEL);
kernel/trace/trace_boot.c
568
if (!q)
kernel/trace/trace_boot.c
570
if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
kernel/trace/trace_boot.c
574
kfree(q);
kernel/trace/trace_boot.c
577
q = kstrdup(p, GFP_KERNEL);
kernel/trace/trace_boot.c
578
if (!q)
kernel/trace/trace_boot.c
580
if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
kernel/trace/trace_boot.c
584
kfree(q);
kernel/trace/trace_events_filter.c
1643
char q;
kernel/trace/trace_events_filter.c
1802
q = str[i];
kernel/trace/trace_events_filter.c
1804
q = 0;
kernel/trace/trace_events_filter.c
1807
if (q && str[i] == q)
kernel/trace/trace_events_filter.c
1809
if (!q && (str[i] == ')' || str[i] == '&' ||
kernel/trace/trace_events_filter.c
1814
if (q)
kernel/trace/trace_events_filter.c
1949
char q = str[i];
kernel/trace/trace_events_filter.c
1971
if (str[i] == q)
kernel/trace/trace_events_inject.c
105
char q = str[i];
kernel/trace/trace_events_inject.c
116
if (str[i] == q)
kernel/watch_queue.c
320
struct watch_type_filter *q;
kernel/watch_queue.c
366
q = wfilter->filters;
kernel/watch_queue.c
371
q->type = tf[i].type;
kernel/watch_queue.c
372
q->info_filter = tf[i].info_filter;
kernel/watch_queue.c
373
q->info_mask = tf[i].info_mask;
kernel/watch_queue.c
374
q->subtype_filter[0] = tf[i].subtype_filter[0];
kernel/watch_queue.c
375
__set_bit(q->type, wfilter->type_filter);
kernel/watch_queue.c
376
q++;
lib/bch.c
779
const struct gf_poly *b, struct gf_poly *q)
lib/bch.c
782
q->deg = a->deg-b->deg;
lib/bch.c
786
memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int));
lib/bch.c
788
q->deg = 0;
lib/bch.c
789
q->c[0] = 0;
lib/bch.c
865
struct gf_poly *q = bch->poly_2t[1];
lib/bch.c
884
gf_poly_div(bch, f, gcd, q);
lib/bch.c
888
gf_poly_copy(*h, q);
lib/bootconfig.c
864
char *p, *q;
lib/bootconfig.c
870
q = strpbrk(p, "{}=+;:\n#");
lib/bootconfig.c
871
if (!q) {
lib/bootconfig.c
878
c = *q;
lib/bootconfig.c
879
*q++ = '\0';
lib/bootconfig.c
883
if (*q++ != '=') {
lib/bootconfig.c
887
q - 2);
lib/bootconfig.c
892
ret = xbc_parse_kv(&p, q, c);
lib/bootconfig.c
895
ret = xbc_open_brace(&p, q);
lib/bootconfig.c
898
q = skip_comment(q);
lib/bootconfig.c
902
ret = xbc_parse_key(&p, q);
lib/bootconfig.c
905
ret = xbc_close_brace(&p, q);
lib/crypto/curve25519-hacl64.c
34
u64 q = x_xor_y | x_sub_y_xor_y;
lib/crypto/curve25519-hacl64.c
35
u64 x_xor_q = x ^ q;
lib/crypto/curve25519-hacl64.c
545
u64 *q, u8 byt)
lib/crypto/curve25519-hacl64.c
550
addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q);
lib/crypto/curve25519-hacl64.c
557
u64 *nqpq2, u64 *q, u8 byt)
lib/crypto/curve25519-hacl64.c
560
ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
lib/crypto/curve25519-hacl64.c
562
ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
lib/crypto/curve25519-hacl64.c
567
u64 *q, u8 byt, u32 i)
lib/crypto/curve25519-hacl64.c
571
nqpq2, q, byt);
lib/crypto/curve25519-hacl64.c
578
u64 *nqpq2, u64 *q,
lib/crypto/curve25519-hacl64.c
583
ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q,
lib/crypto/curve25519-hacl64.c
588
static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
lib/crypto/curve25519-hacl64.c
595
point_copy(nqpq, q);
lib/crypto/curve25519-hacl64.c
597
ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32);
lib/crypto/curve25519-hacl64.c
764
u64 *q;
lib/crypto/curve25519-hacl64.c
767
q = buf0;
lib/crypto/curve25519-hacl64.c
779
ladder_cmult(nq, scalar, q);
lib/crypto/gf128mul.c
57
#define gf128mul_dat(q) { \
lib/crypto/gf128mul.c
58
q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
lib/crypto/gf128mul.c
59
q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
lib/crypto/gf128mul.c
60
q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
lib/crypto/gf128mul.c
61
q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
lib/crypto/gf128mul.c
62
q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
lib/crypto/gf128mul.c
63
q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
lib/crypto/gf128mul.c
64
q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
lib/crypto/gf128mul.c
65
q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
lib/crypto/gf128mul.c
66
q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
lib/crypto/gf128mul.c
67
q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
lib/crypto/gf128mul.c
68
q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
lib/crypto/gf128mul.c
69
q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
lib/crypto/gf128mul.c
70
q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
lib/crypto/gf128mul.c
71
q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
lib/crypto/gf128mul.c
72
q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
lib/crypto/gf128mul.c
73
q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
lib/crypto/gf128mul.c
74
q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
lib/crypto/gf128mul.c
75
q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
lib/crypto/gf128mul.c
76
q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
lib/crypto/gf128mul.c
77
q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
lib/crypto/gf128mul.c
78
q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
lib/crypto/gf128mul.c
79
q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
lib/crypto/gf128mul.c
80
q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
lib/crypto/gf128mul.c
81
q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
lib/crypto/gf128mul.c
82
q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
lib/crypto/gf128mul.c
83
q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
lib/crypto/gf128mul.c
84
q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
lib/crypto/gf128mul.c
85
q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
lib/crypto/gf128mul.c
86
q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
lib/crypto/gf128mul.c
87
q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
lib/crypto/gf128mul.c
88
q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
lib/crypto/gf128mul.c
89
q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
lib/crypto/mpi/longlong.h
1007
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
1047
: "=r" ((USItype)(q)), \
lib/crypto/mpi/longlong.h
1107
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
1132
: "=&r" ((USItype)(q)), \
lib/crypto/mpi/longlong.h
1177
#define sdiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
1184
: "=g" (q), "=g" (r) \
lib/crypto/mpi/longlong.h
1308
#define __udiv_qrnnd_c(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
1338
(q) = (UWtype) __q1 * __ll_B | __q0; \
lib/crypto/mpi/longlong.h
1345
#define udiv_qrnnd(q, r, nh, nl, d) \
lib/crypto/mpi/longlong.h
1348
(q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
lib/crypto/mpi/longlong.h
144
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
146
: "=r" ((USItype)(q)), \
lib/crypto/mpi/longlong.h
162
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
164
(q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
lib/crypto/mpi/longlong.h
287
#define udiv_qrnnd(q, r, nh, nl, d) \
lib/crypto/mpi/longlong.h
289
: "=g" ((USItype)(q)), \
lib/crypto/mpi/longlong.h
339
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
341
(q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
lib/crypto/mpi/longlong.h
379
#define sdiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
388
(q) = __xx.__i.__l; (r) = __xx.__i.__h; \
lib/crypto/mpi/longlong.h
421
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
423
: "=a" (q), \
lib/crypto/mpi/longlong.h
486
#define udiv_qrnnd(q, r, nh, nl, d) \
lib/crypto/mpi/longlong.h
496
(r) = __rq.__i.__l; (q) = __rq.__i.__h; \
lib/crypto/mpi/longlong.h
541
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
543
: "=d" ((USItype)(q)), \
lib/crypto/mpi/longlong.h
549
#define sdiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
551
: "=d" ((USItype)(q)), \
lib/crypto/mpi/longlong.h
622
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
629
(r) = (n0) - __q.__l * (d); (q) = __q.__l; })
lib/crypto/mpi/longlong.h
705
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
714
(r) = __xx.__i.__l; (q) = __xx.__i.__h; })
lib/crypto/mpi/longlong.h
827
#define sdiv_qrnnd(q, r, nh, nl, d) \
lib/crypto/mpi/longlong.h
829
: "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
lib/crypto/mpi/longlong.h
983
#define udiv_qrnnd(q, r, n1, n0, d) \
lib/crypto/mpi/longlong.h
992
(q) = __q; \
lib/crypto/mpi/mpi-internal.h
108
#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
lib/crypto/mpi/mpi-internal.h
130
(q) = _q; \
lib/crypto/mpi/mpih-div.c
248
mpi_limb_t q;
lib/crypto/mpi/mpih-div.c
260
q = ~(mpi_limb_t) 0;
lib/crypto/mpi/mpih-div.c
266
qp[i] = q;
lib/crypto/mpi/mpih-div.c
272
udiv_qrnnd(q, r, n1, n0, d1);
lib/crypto/mpi/mpih-div.c
273
umul_ppmm(n1, n0, d0, q);
lib/crypto/mpi/mpih-div.c
280
q--;
lib/crypto/mpi/mpih-div.c
287
qp[i] = q;
lib/crypto/mpi/mpih-div.c
315
mpi_limb_t q;
lib/crypto/mpi/mpih-div.c
331
q = ~(mpi_limb_t) 0;
lib/crypto/mpi/mpih-div.c
335
udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
lib/crypto/mpi/mpih-div.c
336
umul_ppmm(n1, n0, d1, q);
lib/crypto/mpi/mpih-div.c
341
q--;
lib/crypto/mpi/mpih-div.c
353
cy_limb = mpihelp_submul_1(np, dp, dsize, q);
lib/crypto/mpi/mpih-div.c
357
q--;
lib/crypto/mpi/mpih-div.c
360
qp[i] = q;
lib/crypto/powerpc/curve25519.h
25
asmlinkage void x25519_cswap(fe51 p, fe51 q, unsigned int bit);
lib/crypto/x86/curve25519.h
30
u64 q = x_xor_y | x_sub_y_xor_y;
lib/crypto/x86/curve25519.h
31
u64 x_xor_q = x ^ q;
lib/crypto/x86/curve25519.h
970
static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2)
lib/crypto/x86/curve25519.h
975
u64 *x1 = q;
lib/inflate.c
346
register struct huft *q; /* points to current table */
lib/inflate.c
449
q = (struct huft *)NULL; /* ditto */
lib/inflate.c
488
if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
lib/inflate.c
498
*t = q + 1; /* link to list for huft_free() */
lib/inflate.c
499
*(t = &(q->v.t)) = (struct huft *)NULL;
lib/inflate.c
500
u[h] = ++q; /* table starts after link */
lib/inflate.c
509
r.v.t = q; /* pointer to this table */
lib/inflate.c
537
q[j] = r;
lib/inflate.c
574
register struct huft *p, *q;
lib/inflate.c
581
q = (--p)->v.t;
lib/inflate.c
583
p = q;
lib/logic_iomem.c
192
MAKE_FALLBACK(q, 64);
lib/logic_iomem.c
251
MAKE_OP(q, 64);
lib/lwq.c
102
struct lwq q;
lib/lwq.c
108
lwq_init(&q);
lib/lwq.c
111
threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
lib/lwq.c
118
if (lwq_enqueue(&t->n, &q))
lib/lwq.c
119
wake_up_var(&q);
lib/lwq.c
131
t = lwq_dequeue(&q, struct tnode, n);
lib/lwq.c
137
l = lwq_dequeue_all(&q);
lib/lwq.c
147
lwq_enqueue_batch(l, &q);
lib/lwq.c
149
while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) {
lib/lwq.c
19
struct llist_node *__lwq_dequeue(struct lwq *q)
lib/lwq.c
23
if (lwq_empty(q))
lib/lwq.c
25
spin_lock(&q->lock);
lib/lwq.c
26
this = q->ready;
lib/lwq.c
27
if (!this && !llist_empty(&q->new)) {
lib/lwq.c
29
smp_store_release(&q->ready, (void *)1);
lib/lwq.c
30
this = llist_reverse_order(llist_del_all(&q->new));
lib/lwq.c
32
q->ready = NULL;
lib/lwq.c
35
q->ready = llist_next(this);
lib/lwq.c
36
spin_unlock(&q->lock);
lib/lwq.c
49
struct llist_node *lwq_dequeue_all(struct lwq *q)
lib/lwq.c
53
if (lwq_empty(q))
lib/lwq.c
56
spin_lock(&q->lock);
lib/lwq.c
57
r = q->ready;
lib/lwq.c
58
q->ready = NULL;
lib/lwq.c
59
t = llist_del_all(&q->new);
lib/lwq.c
60
spin_unlock(&q->lock);
lib/lwq.c
84
struct lwq *q = qv;
lib/lwq.c
89
wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL);
lib/lwq.c
91
if (lwq_enqueue(&t->n, q))
lib/lwq.c
92
wake_up_var(q);
lib/math/cordic.c
55
coord.q = 0;
lib/math/cordic.c
73
valtmp = coord.i - (coord.q >> iter);
lib/math/cordic.c
74
coord.q += (coord.i >> iter);
lib/math/cordic.c
77
valtmp = coord.i + (coord.q >> iter);
lib/math/cordic.c
78
coord.q -= (coord.i >> iter);
lib/math/cordic.c
85
coord.q *= signx;
lib/raid6/avx2.c
120
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
lib/raid6/avx2.c
122
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
lib/raid6/avx2.c
144
u8 *p, *q;
lib/raid6/avx2.c
149
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/avx2.c
184
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
lib/raid6/avx2.c
185
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
lib/raid6/avx2.c
196
u8 *p, *q;
lib/raid6/avx2.c
201
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/avx2.c
247
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
lib/raid6/avx2.c
248
asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
lib/raid6/avx2.c
250
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
lib/raid6/avx2.c
251
asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
lib/raid6/avx2.c
276
u8 *p, *q;
lib/raid6/avx2.c
281
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/avx2.c
339
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
lib/raid6/avx2.c
341
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
lib/raid6/avx2.c
343
asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
lib/raid6/avx2.c
345
asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
lib/raid6/avx2.c
357
u8 *p, *q;
lib/raid6/avx2.c
362
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/avx2.c
37
u8 *p, *q;
lib/raid6/avx2.c
42
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/avx2.c
421
asm volatile("prefetchnta %0" :: "m" (q[d]));
lib/raid6/avx2.c
422
asm volatile("prefetchnta %0" :: "m" (q[d+64]));
lib/raid6/avx2.c
450
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
lib/raid6/avx2.c
451
asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
lib/raid6/avx2.c
452
asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
lib/raid6/avx2.c
453
asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
lib/raid6/avx2.c
454
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
lib/raid6/avx2.c
455
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
lib/raid6/avx2.c
456
asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
lib/raid6/avx2.c
457
asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
lib/raid6/avx2.c
74
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
lib/raid6/avx2.c
86
u8 *p, *q;
lib/raid6/avx2.c
91
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/avx512.c
103
u8 *p, *q;
lib/raid6/avx512.c
108
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/avx512.c
151
: "m" (q[d]), "m" (p[d]));
lib/raid6/avx512.c
172
u8 *p, *q;
lib/raid6/avx512.c
177
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/avx512.c
223
: "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
lib/raid6/avx512.c
224
"m" (q[d+64]));
lib/raid6/avx512.c
235
u8 *p, *q;
lib/raid6/avx512.c
240
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/avx512.c
307
: "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
lib/raid6/avx512.c
331
u8 *p, *q;
lib/raid6/avx512.c
336
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/avx512.c
413
"m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
lib/raid6/avx512.c
414
"m" (q[d+128]), "m" (q[d+192]));
lib/raid6/avx512.c
425
u8 *p, *q;
lib/raid6/avx512.c
430
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/avx512.c
45
u8 *p, *q;
lib/raid6/avx512.c
50
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/avx512.c
503
: "m" (q[d]), "m" (q[d+128]));
lib/raid6/avx512.c
547
"m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
lib/raid6/avx512.c
548
"m" (q[d+128]), "m" (q[d+192]));
lib/raid6/avx512.c
92
: "m" (p[d]), "m" (q[d]));
lib/raid6/loongarch_simd.c
105
asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
lib/raid6/loongarch_simd.c
106
asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
lib/raid6/loongarch_simd.c
107
asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
lib/raid6/loongarch_simd.c
108
asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
lib/raid6/loongarch_simd.c
118
u8 *p, *q;
lib/raid6/loongarch_simd.c
123
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/loongarch_simd.c
237
"+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
lib/raid6/loongarch_simd.c
238
"+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
lib/raid6/loongarch_simd.c
267
u8 *p, *q;
lib/raid6/loongarch_simd.c
272
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/loongarch_simd.c
316
asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
lib/raid6/loongarch_simd.c
317
asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
lib/raid6/loongarch_simd.c
327
u8 *p, *q;
lib/raid6/loongarch_simd.c
332
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/loongarch_simd.c
36
u8 *p, *q;
lib/raid6/loongarch_simd.c
407
"+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
lib/raid6/loongarch_simd.c
41
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/mmx.c
124
asm volatile("movq %%mm4,%0" : "=m" (q[d]));
lib/raid6/mmx.c
125
asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
lib/raid6/mmx.c
38
u8 *p, *q;
lib/raid6/mmx.c
43
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/mmx.c
65
asm volatile("movq %%mm4,%0" : "=m" (q[d]));
lib/raid6/mmx.c
86
u8 *p, *q;
lib/raid6/mmx.c
91
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/neon.h
15
void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
lib/raid6/neon.h
19
void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
lib/raid6/recov.c
22
u8 *p, *q, *dp, *dq;
lib/raid6/recov.c
28
q = (u8 *)ptrs[disks-1];
lib/raid6/recov.c
46
ptrs[disks-1] = q;
lib/raid6/recov.c
55
qx = qmul[*q ^ *dq];
lib/raid6/recov.c
58
p++; q++;
lib/raid6/recov.c
66
u8 *p, *q, *dq;
lib/raid6/recov.c
70
q = (u8 *)ptrs[disks-1];
lib/raid6/recov.c
82
ptrs[disks-1] = q;
lib/raid6/recov.c
89
*p++ ^= *dq = qmul[*q ^ *dq];
lib/raid6/recov.c
90
q++; dq++;
lib/raid6/recov_avx2.c
130
q += 64;
lib/raid6/recov_avx2.c
134
asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
lib/raid6/recov_avx2.c
177
q += 32;
lib/raid6/recov_avx2.c
189
u8 *p, *q, *dq;
lib/raid6/recov_avx2.c
19
u8 *p, *q, *dp, *dq;
lib/raid6/recov_avx2.c
194
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_avx2.c
206
ptrs[disks-1] = q;
lib/raid6/recov_avx2.c
219
asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
lib/raid6/recov_avx2.c
220
asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
lib/raid6/recov_avx2.c
25
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_avx2.c
265
q += 64;
lib/raid6/recov_avx2.c
269
asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
lib/raid6/recov_avx2.c
295
q += 32;
lib/raid6/recov_avx2.c
43
ptrs[disks-1] = q;
lib/raid6/recov_avx2.c
57
asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
lib/raid6/recov_avx2.c
58
asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
lib/raid6/recov_avx512.c
155
q += 128;
lib/raid6/recov_avx512.c
164
: "m" (*q), "m" (*p), "m"(*dq), "m" (*dp));
lib/raid6/recov_avx512.c
216
q += 64;
lib/raid6/recov_avx512.c
228
u8 *p, *q, *dq;
lib/raid6/recov_avx512.c
233
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_avx512.c
248
ptrs[disks-1] = q;
lib/raid6/recov_avx512.c
25
u8 *p, *q, *dp, *dq;
lib/raid6/recov_avx512.c
264
: "m" (dq[0]), "m" (dq[64]), "m" (q[0]),
lib/raid6/recov_avx512.c
265
"m" (q[64]));
lib/raid6/recov_avx512.c
31
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_avx512.c
319
q += 128;
lib/raid6/recov_avx512.c
325
: "m" (dq[0]), "m" (q[0]));
lib/raid6/recov_avx512.c
359
q += 64;
lib/raid6/recov_avx512.c
52
ptrs[disks-1] = q;
lib/raid6/recov_avx512.c
75
: "m" (q[0]), "m" (q[64]), "m" (p[0]),
lib/raid6/recov_loongarch_simd.c
178
q += 64;
lib/raid6/recov_loongarch_simd.c
189
u8 *p, *q, *dq;
lib/raid6/recov_loongarch_simd.c
193
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_loongarch_simd.c
207
ptrs[disks - 1] = q;
lib/raid6/recov_loongarch_simd.c
230
asm volatile("vld $vr8, %0" : : "m" (q[0]));
lib/raid6/recov_loongarch_simd.c
231
asm volatile("vld $vr9, %0" : : "m" (q[16]));
lib/raid6/recov_loongarch_simd.c
232
asm volatile("vld $vr10, %0" : : "m" (q[32]));
lib/raid6/recov_loongarch_simd.c
233
asm volatile("vld $vr11, %0" : : "m" (q[48]));
lib/raid6/recov_loongarch_simd.c
281
q += 64;
lib/raid6/recov_loongarch_simd.c
306
u8 *p, *q, *dp, *dq;
lib/raid6/recov_loongarch_simd.c
311
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_loongarch_simd.c
32
u8 *p, *q, *dp, *dq;
lib/raid6/recov_loongarch_simd.c
331
ptrs[disks - 1] = q;
lib/raid6/recov_loongarch_simd.c
354
asm volatile("xvld $xr0, %0" : : "m" (q[0]));
lib/raid6/recov_loongarch_simd.c
355
asm volatile("xvld $xr1, %0" : : "m" (q[32]));
lib/raid6/recov_loongarch_simd.c
37
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_loongarch_simd.c
417
q += 64;
lib/raid6/recov_loongarch_simd.c
428
u8 *p, *q, *dq;
lib/raid6/recov_loongarch_simd.c
432
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_loongarch_simd.c
446
ptrs[disks - 1] = q;
lib/raid6/recov_loongarch_simd.c
467
asm volatile("xvld $xr4, %0" : : "m" (q[0]));
lib/raid6/recov_loongarch_simd.c
468
asm volatile("xvld $xr5, %0" : : "m" (q[32]));
lib/raid6/recov_loongarch_simd.c
499
q += 64;
lib/raid6/recov_loongarch_simd.c
57
ptrs[disks - 1] = q;
lib/raid6/recov_loongarch_simd.c
76
asm volatile("vld $vr4, %0" : : "m" (q[0]));
lib/raid6/recov_loongarch_simd.c
77
asm volatile("vld $vr5, %0" : : "m" (q[16]));
lib/raid6/recov_loongarch_simd.c
78
asm volatile("vld $vr6, %0" : : "m" (q[32]));
lib/raid6/recov_loongarch_simd.c
79
asm volatile("vld $vr7, %0" : : "m" (q[48]));
lib/raid6/recov_neon.c
25
u8 *p, *q, *dp, *dq;
lib/raid6/recov_neon.c
30
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_neon.c
50
ptrs[disks - 1] = q;
lib/raid6/recov_neon.c
58
__raid6_2data_recov_neon(bytes, p, q, dp, dq, pbmul, qmul);
lib/raid6/recov_neon.c
64
u8 *p, *q, *dq;
lib/raid6/recov_neon.c
68
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_neon.c
82
ptrs[disks - 1] = q;
lib/raid6/recov_neon.c
88
__raid6_datap_recov_neon(bytes, p, q, dq, qmul);
lib/raid6/recov_neon_inner.c
108
q += 16;
lib/raid6/recov_neon_inner.c
28
void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
lib/raid6/recov_neon_inner.c
54
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
lib/raid6/recov_neon_inner.c
72
q += 16;
lib/raid6/recov_neon_inner.c
78
void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
lib/raid6/recov_neon_inner.c
95
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
lib/raid6/recov_rvv.c
10
static void __raid6_2data_recov_rvv(int bytes, u8 *p, u8 *q, u8 *dp,
lib/raid6/recov_rvv.c
131
[vx]"r"(q),
lib/raid6/recov_rvv.c
140
q += 16;
lib/raid6/recov_rvv.c
148
u8 *p, *q, *dp, *dq;
lib/raid6/recov_rvv.c
153
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_rvv.c
173
ptrs[disks - 1] = q;
lib/raid6/recov_rvv.c
181
__raid6_2data_recov_rvv(bytes, p, q, dp, dq, pbmul, qmul);
lib/raid6/recov_rvv.c
188
u8 *p, *q, *dq;
lib/raid6/recov_rvv.c
192
q = (u8 *)ptrs[disks - 1];
lib/raid6/recov_rvv.c
206
ptrs[disks - 1] = q;
lib/raid6/recov_rvv.c
212
__raid6_datap_recov_rvv(bytes, p, q, dq, qmul);
lib/raid6/recov_rvv.c
73
[qx]"r"(q),
lib/raid6/recov_rvv.c
83
q += 16;
lib/raid6/recov_rvv.c
89
static void __raid6_datap_recov_rvv(int bytes, u8 *p, u8 *q,
lib/raid6/recov_s390xc.c
103
q += 256;
lib/raid6/recov_s390xc.c
25
u8 *p, *q, *dp, *dq;
lib/raid6/recov_s390xc.c
31
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_s390xc.c
49
ptrs[disks-1] = q;
lib/raid6/recov_s390xc.c
58
xor_block(dq, q);
lib/raid6/recov_s390xc.c
63
q += 256;
lib/raid6/recov_s390xc.c
74
u8 *p, *q, *dq;
lib/raid6/recov_s390xc.c
79
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_s390xc.c
91
ptrs[disks-1] = q;
lib/raid6/recov_s390xc.c
98
xor_block(dq, q);
lib/raid6/recov_ssse3.c
134
q += 32;
lib/raid6/recov_ssse3.c
138
asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
lib/raid6/recov_ssse3.c
181
q += 16;
lib/raid6/recov_ssse3.c
19
u8 *p, *q, *dp, *dq;
lib/raid6/recov_ssse3.c
194
u8 *p, *q, *dq;
lib/raid6/recov_ssse3.c
201
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_ssse3.c
213
ptrs[disks-1] = q;
lib/raid6/recov_ssse3.c
226
asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
lib/raid6/recov_ssse3.c
231
asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
lib/raid6/recov_ssse3.c
27
q = (u8 *)ptrs[disks-1];
lib/raid6/recov_ssse3.c
279
q += 32;
lib/raid6/recov_ssse3.c
285
asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
lib/raid6/recov_ssse3.c
310
q += 16;
lib/raid6/recov_ssse3.c
45
ptrs[disks-1] = q;
lib/raid6/recov_ssse3.c
67
asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
lib/raid6/recov_ssse3.c
68
asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
lib/raid6/rvv.c
102
q = dptr[disks - 1]; /* RS syndrome */
lib/raid6/rvv.c
1206
[wq0]"r"(&q[d + nsize * 0]),
lib/raid6/rvv.c
1208
[wq1]"r"(&q[d + nsize * 1]),
lib/raid6/rvv.c
1210
[wq2]"r"(&q[d + nsize * 2]),
lib/raid6/rvv.c
1212
[wq3]"r"(&q[d + nsize * 3]),
lib/raid6/rvv.c
1214
[wq4]"r"(&q[d + nsize * 4]),
lib/raid6/rvv.c
1216
[wq5]"r"(&q[d + nsize * 5]),
lib/raid6/rvv.c
1218
[wq6]"r"(&q[d + nsize * 6]),
lib/raid6/rvv.c
1220
[wq7]"r"(&q[d + nsize * 7])
lib/raid6/rvv.c
188
[wq0]"r"(&q[d + nsize * 0])
lib/raid6/rvv.c
196
u8 *p, *q;
lib/raid6/rvv.c
202
q = dptr[z0 + 2]; /* RS syndrome */
lib/raid6/rvv.c
21
u8 *p, *q;
lib/raid6/rvv.c
27
q = dptr[z0 + 2]; /* RS syndrome */
lib/raid6/rvv.c
279
[wq0]"r"(&q[d + nsize * 0]),
lib/raid6/rvv.c
281
[wq1]"r"(&q[d + nsize * 1])
lib/raid6/rvv.c
290
u8 *p, *q;
lib/raid6/rvv.c
296
q = dptr[disks - 1]; /* RS syndrome */
lib/raid6/rvv.c
410
[wq0]"r"(&q[d + nsize * 0]),
lib/raid6/rvv.c
412
[wq1]"r"(&q[d + nsize * 1])
lib/raid6/rvv.c
420
u8 *p, *q;
lib/raid6/rvv.c
426
q = dptr[z0 + 2]; /* RS syndrome */
lib/raid6/rvv.c
533
[wq0]"r"(&q[d + nsize * 0]),
lib/raid6/rvv.c
535
[wq1]"r"(&q[d + nsize * 1]),
lib/raid6/rvv.c
537
[wq2]"r"(&q[d + nsize * 2]),
lib/raid6/rvv.c
539
[wq3]"r"(&q[d + nsize * 3])
lib/raid6/rvv.c
548
u8 *p, *q;
lib/raid6/rvv.c
554
q = dptr[disks - 1]; /* RS syndrome */
lib/raid6/rvv.c
720
[wq0]"r"(&q[d + nsize * 0]),
lib/raid6/rvv.c
722
[wq1]"r"(&q[d + nsize * 1]),
lib/raid6/rvv.c
724
[wq2]"r"(&q[d + nsize * 2]),
lib/raid6/rvv.c
726
[wq3]"r"(&q[d + nsize * 3])
lib/raid6/rvv.c
734
u8 *p, *q;
lib/raid6/rvv.c
740
q = dptr[z0 + 2]; /* RS syndrome */
lib/raid6/rvv.c
87
[wq0]"r"(&q[d + nsize * 0])
lib/raid6/rvv.c
907
[wq0]"r"(&q[d + nsize * 0]),
lib/raid6/rvv.c
909
[wq1]"r"(&q[d + nsize * 1]),
lib/raid6/rvv.c
911
[wq2]"r"(&q[d + nsize * 2]),
lib/raid6/rvv.c
913
[wq3]"r"(&q[d + nsize * 3]),
lib/raid6/rvv.c
915
[wq4]"r"(&q[d + nsize * 4]),
lib/raid6/rvv.c
917
[wq5]"r"(&q[d + nsize * 5]),
lib/raid6/rvv.c
919
[wq6]"r"(&q[d + nsize * 6]),
lib/raid6/rvv.c
921
[wq7]"r"(&q[d + nsize * 7])
lib/raid6/rvv.c
930
u8 *p, *q;
lib/raid6/rvv.c
936
q = dptr[disks - 1]; /* RS syndrome */
lib/raid6/rvv.c
96
u8 *p, *q;
lib/raid6/sse1.c
102
u8 *p, *q;
lib/raid6/sse1.c
107
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/sse1.c
143
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
lib/raid6/sse1.c
144
asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
lib/raid6/sse1.c
43
u8 *p, *q;
lib/raid6/sse1.c
48
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/sse1.c
81
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
125
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
lib/raid6/sse2.c
127
asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
149
u8 *p, *q;
lib/raid6/sse2.c
154
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/sse2.c
190
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
191
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
lib/raid6/sse2.c
202
u8 *p, *q;
lib/raid6/sse2.c
207
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/sse2.c
252
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
lib/raid6/sse2.c
253
asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
lib/raid6/sse2.c
255
asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
256
asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
lib/raid6/sse2.c
281
u8 *p, *q;
lib/raid6/sse2.c
286
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/sse2.c
350
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
352
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
lib/raid6/sse2.c
354
asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
lib/raid6/sse2.c
356
asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
lib/raid6/sse2.c
368
u8 *p, *q;
lib/raid6/sse2.c
373
q = dptr[disks-1]; /* RS syndrome */
lib/raid6/sse2.c
39
u8 *p, *q;
lib/raid6/sse2.c
429
asm volatile("prefetchnta %0" :: "m" (q[d]));
lib/raid6/sse2.c
430
asm volatile("prefetchnta %0" :: "m" (q[d+32]));
lib/raid6/sse2.c
44
q = dptr[z0+2]; /* RS syndrome */
lib/raid6/sse2.c
458
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
lib/raid6/sse2.c
459
asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
lib/raid6/sse2.c
460
asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
lib/raid6/sse2.c
461
asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
lib/raid6/sse2.c
462
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
463
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
lib/raid6/sse2.c
464
asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
lib/raid6/sse2.c
465
asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
lib/raid6/sse2.c
78
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
lib/raid6/sse2.c
91
u8 *p, *q;
lib/raid6/sse2.c
96
q = dptr[disks-1]; /* RS syndrome */
lib/reed_solomon/decode_rs.c
201
q = 1; /* lambda[0] is always 0 */
lib/reed_solomon/decode_rs.c
205
q ^= alpha_to[reg[j]];
lib/reed_solomon/decode_rs.c
208
if (q != 0)
lib/reed_solomon/decode_rs.c
23
uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
lib/string_helpers.c
191
char *p = *dst, *q = *src;
lib/string_helpers.c
193
switch (*q) {
lib/string_helpers.c
219
char *p = *dst, *q = *src;
lib/string_helpers.c
222
if (isodigit(*q) == 0)
lib/string_helpers.c
225
num = (*q++) & 7;
lib/string_helpers.c
226
while (num < 32 && isodigit(*q) && (q - *src < 3)) {
lib/string_helpers.c
228
num += (*q++) & 7;
lib/string_helpers.c
232
*src = q;
lib/string_helpers.c
238
char *p = *dst, *q = *src;
lib/string_helpers.c
242
if (*q++ != 'x')
lib/string_helpers.c
245
num = digit = hex_to_bin(*q++);
lib/string_helpers.c
249
digit = hex_to_bin(*q);
lib/string_helpers.c
251
q++;
lib/string_helpers.c
256
*src = q;
lib/string_helpers.c
262
char *p = *dst, *q = *src;
lib/string_helpers.c
264
switch (*q) {
lib/test_hexdump.c
100
size_t amount = strlen(q);
lib/test_hexdump.c
102
memcpy(p, q, amount);
lib/test_hexdump.c
99
const char *q = *result++;
lib/ts_kmp.c
45
unsigned int i, q = 0, text_len, consumed = state->offset;
lib/ts_kmp.c
56
while (q > 0 && kmp->pattern[q]
lib/ts_kmp.c
58
q = kmp->prefix_tbl[q - 1];
lib/ts_kmp.c
59
if (kmp->pattern[q]
lib/ts_kmp.c
61
q++;
lib/ts_kmp.c
62
if (unlikely(q == kmp->pattern_len)) {
lib/ts_kmp.c
77
unsigned int k, q;
lib/ts_kmp.c
80
for (k = 0, q = 1; q < len; q++) {
lib/ts_kmp.c
82
!= (icase ? toupper(pattern[q]) : pattern[q]))
lib/ts_kmp.c
85
== (icase ? toupper(pattern[q]) : pattern[q]))
lib/ts_kmp.c
87
prefix_tbl[q] = k;
lib/vsprintf.c
245
unsigned q;
lib/vsprintf.c
252
q = (r * (u64)0x28f5c29) >> 32;
lib/vsprintf.c
253
*((u16 *)buf) = decpair[r - 100*q];
lib/vsprintf.c
257
if (q < 100)
lib/vsprintf.c
261
r = (q * (u64)0x28f5c29) >> 32;
lib/vsprintf.c
262
*((u16 *)buf) = decpair[q - 100*r];
lib/vsprintf.c
270
q = (r * 0x147b) >> 19;
lib/vsprintf.c
271
*((u16 *)buf) = decpair[r - 100*q];
lib/vsprintf.c
275
r = q;
lib/vsprintf.c
287
unsigned q;
lib/vsprintf.c
290
q = (r * (u64)0x28f5c29) >> 32;
lib/vsprintf.c
291
*((u16 *)buf) = decpair[r - 100*q];
lib/vsprintf.c
295
r = (q * (u64)0x28f5c29) >> 32;
lib/vsprintf.c
296
*((u16 *)buf) = decpair[q - 100*r];
lib/vsprintf.c
300
q = (r * 0x147b) >> 19;
lib/vsprintf.c
301
*((u16 *)buf) = decpair[r - 100*q];
lib/vsprintf.c
305
*((u16 *)buf) = decpair[q];
lib/vsprintf.c
327
unsigned q;
lib/vsprintf.c
330
q = (r * 0x147b) >> 19;
lib/vsprintf.c
331
*((u16 *)buf) = decpair[r - 100*q];
lib/vsprintf.c
334
*((u16 *)buf) = decpair[q];
lib/vsprintf.c
347
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
lib/vsprintf.c
349
put_dec_full4(buf, x - q * 10000);
lib/vsprintf.c
350
return q;
lib/vsprintf.c
361
uint32_t d3, d2, d1, q, h;
lib/vsprintf.c
373
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
lib/vsprintf.c
374
q = put_dec_helper4(buf, q);
lib/vsprintf.c
376
q += 7671 * d3 + 9496 * d2 + 6 * d1;
lib/vsprintf.c
377
q = put_dec_helper4(buf+4, q);
lib/vsprintf.c
379
q += 4749 * d3 + 42 * d2;
lib/vsprintf.c
380
q = put_dec_helper4(buf+8, q);
lib/vsprintf.c
382
q += 281 * d3;
lib/vsprintf.c
384
if (q)
lib/vsprintf.c
385
buf = put_dec_trunc8(buf, q);
lib/zlib_inflate/inflate.h
115
#define REVERSE(q) \
lib/zlib_inflate/inflate.h
116
((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
lib/zlib_inflate/inflate.h
117
(((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
mm/damon/core.c
344
void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g)
mm/damon/core.c
346
list_add_tail(&g->list, &q->goals);
mm/damon/core.c
775
int n, struct damos_quota *q)
mm/damon/core.c
780
damos_for_each_quota_goal(goal, q) {
mm/filemap.c
1186
wait_queue_head_t *q = folio_waitqueue(folio);
mm/filemap.c
1194
spin_lock_irqsave(&q->lock, flags);
mm/filemap.c
1195
__wake_up_locked_key(q, TASK_NORMAL, &key);
mm/filemap.c
1206
if (!waitqueue_active(q) || !key.page_match)
mm/filemap.c
1209
spin_unlock_irqrestore(&q->lock, flags);
mm/filemap.c
1247
wait_queue_head_t *q = folio_waitqueue(folio);
mm/filemap.c
1289
spin_lock_irq(&q->lock);
mm/filemap.c
1292
__add_wait_queue_entry_tail(q, wait);
mm/filemap.c
1293
spin_unlock_irq(&q->lock);
mm/filemap.c
1354
finish_wait(q, wait);
mm/filemap.c
1407
wait_queue_head_t *q;
mm/filemap.c
1410
q = folio_waitqueue(folio);
mm/filemap.c
1423
spin_lock_irq(&q->lock);
mm/filemap.c
1426
__add_wait_queue_entry_tail(q, wait);
mm/filemap.c
1427
spin_unlock_irq(&q->lock);
mm/filemap.c
1456
finish_wait(q, wait);
mm/filemap.c
1718
struct wait_queue_head *q = folio_waitqueue(folio);
mm/filemap.c
1724
spin_lock_irq(&q->lock);
mm/filemap.c
1725
__add_wait_queue_entry_tail(q, &wait->wait);
mm/filemap.c
1735
__remove_wait_queue(q, &wait->wait);
mm/filemap.c
1738
spin_unlock_irq(&q->lock);
mm/kasan/quarantine.c
166
static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
mm/kasan/quarantine.c
170
if (unlikely(qlist_empty(q)))
mm/kasan/quarantine.c
173
qlink = q->head;
mm/kasan/quarantine.c
182
qlist_init(q);
mm/kasan/quarantine.c
188
struct qlist_head *q;
mm/kasan/quarantine.c
209
q = this_cpu_ptr(&cpu_quarantine);
mm/kasan/quarantine.c
210
if (q->offline) {
mm/kasan/quarantine.c
214
qlist_put(q, &meta->quarantine_link, cache->size);
mm/kasan/quarantine.c
215
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
mm/kasan/quarantine.c
216
qlist_move_all(q, &temp);
mm/kasan/quarantine.c
314
static void __per_cpu_remove_cache(struct qlist_head *q, void *arg)
mm/kasan/quarantine.c
322
qlist_move_cache(q, &sq->qlist, cache);
mm/kasan/quarantine.c
328
struct qlist_head *q;
mm/kasan/quarantine.c
330
q = this_cpu_ptr(&cpu_quarantine);
mm/kasan/quarantine.c
336
if (READ_ONCE(q->offline))
mm/kasan/quarantine.c
338
__per_cpu_remove_cache(q, arg);
mm/kasan/quarantine.c
391
struct qlist_head *q;
mm/kasan/quarantine.c
393
q = this_cpu_ptr(&cpu_quarantine);
mm/kasan/quarantine.c
398
WRITE_ONCE(q->offline, true);
mm/kasan/quarantine.c
400
qlist_free_all(q, NULL);
mm/kasan/quarantine.c
44
static bool qlist_empty(struct qlist_head *q)
mm/kasan/quarantine.c
46
return !q->head;
mm/kasan/quarantine.c
49
static void qlist_init(struct qlist_head *q)
mm/kasan/quarantine.c
51
q->head = q->tail = NULL;
mm/kasan/quarantine.c
52
q->bytes = 0;
mm/kasan/quarantine.c
55
static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
mm/kasan/quarantine.c
58
if (unlikely(qlist_empty(q)))
mm/kasan/quarantine.c
59
q->head = qlink;
mm/kasan/quarantine.c
61
q->tail->next = qlink;
mm/kasan/quarantine.c
62
q->tail = qlink;
mm/kasan/quarantine.c
64
q->bytes += size;
net/atm/lec.c
898
int q;
net/atm/lec.c
900
for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) {
net/atm/lec.c
901
v = lec_tbl_walk(state, lec_misc_tables[q], l);
net/atm/lec.c
905
state->misc_table = q;
net/bluetooth/hci_core.c
3388
int cnt, q;
net/bluetooth/hci_core.c
3418
q = cnt / num;
net/bluetooth/hci_core.c
3419
*quote = q ? q : 1;
net/caif/caif_dev.c
203
if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
net/core/dev.c
2641
int q = netdev_get_prio_tc_map(dev, i);
net/core/dev.c
2643
tc = &dev->tc_to_txq[q];
net/core/dev.c
2646
i, q);
net/core/dev.c
3380
static void __netif_reschedule(struct Qdisc *q)
net/core/dev.c
3387
q->next_sched = NULL;
net/core/dev.c
3388
*sd->output_queue_tailp = q;
net/core/dev.c
3389
sd->output_queue_tailp = &q->next_sched;
net/core/dev.c
3394
void __netif_schedule(struct Qdisc *q)
net/core/dev.c
3400
if (!llist_empty(&q->defer_list))
net/core/dev.c
3403
if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
net/core/dev.c
3404
__netif_reschedule(q);
net/core/dev.c
3421
struct Qdisc *q = rcu_dereference(txq->qdisc);
net/core/dev.c
3423
__netif_schedule(q);
net/core/dev.c
3432
struct Qdisc *q;
net/core/dev.c
3435
q = rcu_dereference(dev_queue->qdisc);
net/core/dev.c
3436
__netif_schedule(q);
net/core/dev.c
4162
static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
net/core/dev.c
4168
rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
net/core/dev.c
4170
trace_qdisc_enqueue(q, txq, skb);
net/core/dev.c
4174
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
net/core/dev.c
4179
spinlock_t *root_lock = qdisc_lock(q);
net/core/dev.c
4184
qdisc_calculate_pkt_len(skb, q);
net/core/dev.c
4188
if (q->flags & TCQ_F_NOLOCK) {
net/core/dev.c
4189
if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
net/core/dev.c
4190
qdisc_run_begin(q)) {
net/core/dev.c
4194
if (unlikely(!nolock_qdisc_is_empty(q))) {
net/core/dev.c
4195
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
net/core/dev.c
4196
__qdisc_run(q);
net/core/dev.c
4197
to_free2 = qdisc_run_end(q);
net/core/dev.c
4202
qdisc_bstats_cpu_update(q, skb);
net/core/dev.c
4203
if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
net/core/dev.c
4204
!nolock_qdisc_is_empty(q))
net/core/dev.c
4205
__qdisc_run(q);
net/core/dev.c
4207
to_free2 = qdisc_run_end(q);
net/core/dev.c
4212
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
net/core/dev.c
4213
to_free2 = qdisc_run(q);
net/core/dev.c
4223
first_n = READ_ONCE(q->defer_list.first);
net/core/dev.c
4226
defer_count = atomic_long_inc_return(&q->defer_count);
net/core/dev.c
4233
} while (!try_cmpxchg(&q->defer_list.first, &first_n, &skb->ll_node));
net/core/dev.c
4243
ll_list = llist_del_all(&q->defer_list);
net/core/dev.c
4248
atomic_long_set(&q->defer_count, 0);
net/core/dev.c
4252
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
net/core/dev.c
4258
if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
net/core/dev.c
4259
!llist_next(ll_list) && qdisc_run_begin(q)) {
net/core/dev.c
4269
qdisc_bstats_update(q, skb);
net/core/dev.c
4270
if (sch_direct_xmit(skb, q, dev, txq, root_lock, true))
net/core/dev.c
4271
__qdisc_run(q);
net/core/dev.c
4272
to_free2 = qdisc_run_end(q);
net/core/dev.c
4283
rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
net/core/dev.c
4286
to_free2 = qdisc_run(q);
net/core/dev.c
4764
struct Qdisc *q;
net/core/dev.c
4815
q = rcu_dereference_bh(txq->qdisc);
net/core/dev.c
4818
if (q->enqueue) {
net/core/dev.c
4819
rc = __dev_xmit_skb(skb, q, dev, txq);
net/core/dev.c
5798
struct Qdisc *q = head;
net/core/dev.c
5807
if (!(q->flags & TCQ_F_NOLOCK)) {
net/core/dev.c
5808
root_lock = qdisc_lock(q);
net/core/dev.c
5811
&q->state))) {
net/core/dev.c
5820
clear_bit(__QDISC_STATE_SCHED, &q->state);
net/core/dev.c
5824
clear_bit(__QDISC_STATE_SCHED, &q->state);
net/core/dev.c
5825
to_free = qdisc_run(q);
net/core/gen_stats.c
341
const struct gnet_stats_queue __percpu *q)
net/core/gen_stats.c
346
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
net/core/gen_stats.c
358
const struct gnet_stats_queue *q)
net/core/gen_stats.c
363
qstats->qlen += q->qlen;
net/core/gen_stats.c
364
qstats->backlog += q->backlog;
net/core/gen_stats.c
365
qstats->drops += q->drops;
net/core/gen_stats.c
366
qstats->requeues += q->requeues;
net/core/gen_stats.c
367
qstats->overlimits += q->overlimits;
net/core/gen_stats.c
389
struct gnet_stats_queue *q, __u32 qlen)
net/core/gen_stats.c
393
gnet_stats_add_queue(&qstats, cpu_q, q);
net/core/pktgen.c
3442
struct list_head *q, *n;
net/core/pktgen.c
3447
list_for_each_safe(q, n, &t->if_list) {
net/core/pktgen.c
3448
cur = list_entry(q, struct pktgen_dev, list);
net/core/pktgen.c
3464
struct list_head *q, *n;
net/core/pktgen.c
3471
list_for_each_safe(q, n, &t->if_list) {
net/core/pktgen.c
3472
cur = list_entry(q, struct pktgen_dev, list);
net/core/pktgen.c
3971
struct list_head *q, *n;
net/core/pktgen.c
3975
list_for_each_safe(q, n, &t->if_list) {
net/core/pktgen.c
3976
p = list_entry(q, struct pktgen_dev, list);
net/core/pktgen.c
4070
struct list_head *q, *n;
net/core/pktgen.c
4080
list_for_each_safe(q, n, &list) {
net/core/pktgen.c
4081
t = list_entry(q, struct pktgen_thread, th_list);
net/core/skbuff.c
1843
struct sk_buff_head *q;
net/core/skbuff.c
1871
q = &sk->sk_error_queue;
net/core/skbuff.c
1872
spin_lock_irqsave(&q->lock, flags);
net/core/skbuff.c
1873
tail = skb_peek_tail(q);
net/core/skbuff.c
1876
__skb_queue_tail(q, skb);
net/core/skbuff.c
1879
spin_unlock_irqrestore(&q->lock, flags);
net/core/skbuff.c
5501
struct sk_buff_head *q = &sk->sk_error_queue;
net/core/skbuff.c
5506
if (skb_queue_empty_lockless(q))
net/core/skbuff.c
5509
spin_lock_irqsave(&q->lock, flags);
net/core/skbuff.c
5510
skb = __skb_dequeue(q);
net/core/skbuff.c
5511
if (skb && (skb_next = skb_peek(q))) {
net/core/skbuff.c
5516
spin_unlock_irqrestore(&q->lock, flags);
net/core/xdp.c
529
bq->q[bq->count++] = skb_frag_netmem(frag);
net/core/xdp.c
534
bq->q[bq->count++] = virt_to_netmem(xdpf->data);
net/ieee802154/6lowpan/6lowpan_i.h
30
struct inet_frag_queue q;
net/ieee802154/6lowpan/reassembly.c
100
if (fq->q.flags & INET_FRAG_COMPLETE)
net/ieee802154/6lowpan/reassembly.c
111
if (end < fq->q.len ||
net/ieee802154/6lowpan/reassembly.c
112
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
net/ieee802154/6lowpan/reassembly.c
114
fq->q.flags |= INET_FRAG_LAST_IN;
net/ieee802154/6lowpan/reassembly.c
115
fq->q.len = end;
net/ieee802154/6lowpan/reassembly.c
117
if (end > fq->q.len) {
net/ieee802154/6lowpan/reassembly.c
119
if (fq->q.flags & INET_FRAG_LAST_IN)
net/ieee802154/6lowpan/reassembly.c
121
fq->q.len = end;
net/ieee802154/6lowpan/reassembly.c
130
prev_tail = fq->q.fragments_tail;
net/ieee802154/6lowpan/reassembly.c
131
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
net/ieee802154/6lowpan/reassembly.c
135
fq->q.stamp = skb->tstamp;
net/ieee802154/6lowpan/reassembly.c
136
fq->q.tstamp_type = skb->tstamp_type;
net/ieee802154/6lowpan/reassembly.c
138
fq->q.flags |= INET_FRAG_FIRST_IN;
net/ieee802154/6lowpan/reassembly.c
140
fq->q.meat += skb->len;
net/ieee802154/6lowpan/reassembly.c
141
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
net/ieee802154/6lowpan/reassembly.c
143
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
net/ieee802154/6lowpan/reassembly.c
144
fq->q.meat == fq->q.len) {
net/ieee802154/6lowpan/reassembly.c
173
inet_frag_kill(&fq->q, refs);
net/ieee802154/6lowpan/reassembly.c
175
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
net/ieee802154/6lowpan/reassembly.c
178
inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
net/ieee802154/6lowpan/reassembly.c
181
skb->tstamp = fq->q.stamp;
net/ieee802154/6lowpan/reassembly.c
182
fq->q.rb_fragments = RB_ROOT;
net/ieee802154/6lowpan/reassembly.c
183
fq->q.fragments_tail = NULL;
net/ieee802154/6lowpan/reassembly.c
184
fq->q.last_run_head = NULL;
net/ieee802154/6lowpan/reassembly.c
312
spin_lock(&fq->q.lock);
net/ieee802154/6lowpan/reassembly.c
314
spin_unlock(&fq->q.lock);
net/ieee802154/6lowpan/reassembly.c
317
inet_frag_putn(&fq->q, refs);
net/ieee802154/6lowpan/reassembly.c
37
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
net/ieee802154/6lowpan/reassembly.c
41
BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
net/ieee802154/6lowpan/reassembly.c
42
memcpy(&q->key, key, sizeof(*key));
net/ieee802154/6lowpan/reassembly.c
51
fq = container_of(frag, struct frag_queue, q);
net/ieee802154/6lowpan/reassembly.c
53
spin_lock(&fq->q.lock);
net/ieee802154/6lowpan/reassembly.c
55
if (fq->q.flags & INET_FRAG_COMPLETE)
net/ieee802154/6lowpan/reassembly.c
58
inet_frag_kill(&fq->q, &refs);
net/ieee802154/6lowpan/reassembly.c
60
spin_unlock(&fq->q.lock);
net/ieee802154/6lowpan/reassembly.c
61
inet_frag_putn(&fq->q, refs);
net/ieee802154/6lowpan/reassembly.c
72
struct inet_frag_queue *q;
net/ieee802154/6lowpan/reassembly.c
79
q = inet_frag_find(ieee802154_lowpan->fqdir, &key);
net/ieee802154/6lowpan/reassembly.c
80
if (!q)
net/ieee802154/6lowpan/reassembly.c
83
return container_of(q, struct lowpan_frag_queue, q);
net/ipv4/af_inet.c
1898
struct inet_protosw *q;
net/ipv4/af_inet.c
1964
for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
net/ipv4/af_inet.c
1965
inet_register_protosw(q);
net/ipv4/inet_fragment.c
292
struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
net/ipv4/inet_fragment.c
294
struct inet_frags *f = q->fqdir->f;
net/ipv4/inet_fragment.c
297
f->destructor(q);
net/ipv4/inet_fragment.c
298
kmem_cache_free(f->frags_cachep, q);
net/ipv4/inet_fragment.c
323
void inet_frag_queue_flush(struct inet_frag_queue *q,
net/ipv4/inet_fragment.c
329
sum = inet_frag_rbtree_purge(&q->rb_fragments, reason);
net/ipv4/inet_fragment.c
330
sub_frag_mem_limit(q->fqdir, sum);
net/ipv4/inet_fragment.c
334
void inet_frag_destroy(struct inet_frag_queue *q)
net/ipv4/inet_fragment.c
341
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
net/ipv4/inet_fragment.c
342
reason = (q->flags & INET_FRAG_DROP) ?
net/ipv4/inet_fragment.c
345
WARN_ON(timer_delete(&q->timer) != 0);
net/ipv4/inet_fragment.c
348
fqdir = q->fqdir;
net/ipv4/inet_fragment.c
350
sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
net/ipv4/inet_fragment.c
353
call_rcu(&q->rcu, inet_frag_destroy_rcu);
net/ipv4/inet_fragment.c
363
struct inet_frag_queue *q;
net/ipv4/inet_fragment.c
365
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
net/ipv4/inet_fragment.c
366
if (!q)
net/ipv4/inet_fragment.c
369
q->fqdir = fqdir;
net/ipv4/inet_fragment.c
370
f->constructor(q, arg);
net/ipv4/inet_fragment.c
373
timer_setup(&q->timer, f->frag_expire, 0);
net/ipv4/inet_fragment.c
374
spin_lock_init(&q->lock);
net/ipv4/inet_fragment.c
378
refcount_set(&q->refcnt, 2);
net/ipv4/inet_fragment.c
380
return q;
net/ipv4/inet_fragment.c
388
struct inet_frag_queue *q;
net/ipv4/inet_fragment.c
390
q = inet_frag_alloc(fqdir, f, arg);
net/ipv4/inet_fragment.c
391
if (!q) {
net/ipv4/inet_fragment.c
395
mod_timer(&q->timer, jiffies + fqdir->timeout);
net/ipv4/inet_fragment.c
397
*prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
net/ipv4/inet_fragment.c
398
&q->node, f->rhash_params);
net/ipv4/inet_fragment.c
406
q->flags |= INET_FRAG_COMPLETE;
net/ipv4/inet_fragment.c
407
inet_frag_kill(q, &refs);
net/ipv4/inet_fragment.c
408
inet_frag_putn(q, refs);
net/ipv4/inet_fragment.c
411
return q;
net/ipv4/inet_fragment.c
432
int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
net/ipv4/inet_fragment.c
435
struct sk_buff *last = q->fragments_tail;
net/ipv4/inet_fragment.c
447
fragrun_create(q, skb); /* First fragment. */
net/ipv4/inet_fragment.c
454
fragrun_append_to_last(q, skb);
net/ipv4/inet_fragment.c
456
fragrun_create(q, skb);
net/ipv4/inet_fragment.c
463
rbn = &q->rb_fragments.rb_node;
net/ipv4/inet_fragment.c
487
rb_insert_color(&skb->rbnode, &q->rb_fragments);
net/ipv4/inet_fragment.c
498
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
net/ipv4/inet_fragment.c
501
struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
net/ipv4/inet_fragment.c
529
&q->rb_fragments);
net/ipv4/inet_fragment.c
530
if (q->fragments_tail == skb)
net/ipv4/inet_fragment.c
531
q->fragments_tail = fp;
net/ipv4/inet_fragment.c
541
&q->rb_fragments);
net/ipv4/inet_fragment.c
555
add_frag_mem_limit(q->fqdir, delta);
net/ipv4/inet_fragment.c
57
static void fragrun_append_to_last(struct inet_frag_queue *q,
net/ipv4/inet_fragment.c
577
add_frag_mem_limit(q->fqdir, clone->truesize);
net/ipv4/inet_fragment.c
603
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
net/ipv4/inet_fragment.c
618
rb_erase(&head->rbnode, &q->rb_fragments);
net/ipv4/inet_fragment.c
62
FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
net/ipv4/inet_fragment.c
63
FRAG_CB(q->fragments_tail)->next_frag = skb;
net/ipv4/inet_fragment.c
64
q->fragments_tail = skb;
net/ipv4/inet_fragment.c
660
rb_erase(rbn, &q->rb_fragments);
net/ipv4/inet_fragment.c
664
sub_frag_mem_limit(q->fqdir, sum_truesize);
net/ipv4/inet_fragment.c
669
head->tstamp = q->stamp;
net/ipv4/inet_fragment.c
670
head->tstamp_type = q->tstamp_type;
net/ipv4/inet_fragment.c
677
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
net/ipv4/inet_fragment.c
68
static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
net/ipv4/inet_fragment.c
681
head = skb_rb_first(&q->rb_fragments);
net/ipv4/inet_fragment.c
687
&q->rb_fragments);
net/ipv4/inet_fragment.c
689
rb_erase(&head->rbnode, &q->rb_fragments);
net/ipv4/inet_fragment.c
693
if (head == q->fragments_tail)
net/ipv4/inet_fragment.c
694
q->fragments_tail = NULL;
net/ipv4/inet_fragment.c
696
sub_frag_mem_limit(q->fqdir, head->truesize);
net/ipv4/inet_fragment.c
73
if (q->last_run_head)
net/ipv4/inet_fragment.c
74
rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
net/ipv4/inet_fragment.c
75
&q->last_run_head->rbnode.rb_right);
net/ipv4/inet_fragment.c
77
rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
net/ipv4/inet_fragment.c
78
rb_insert_color(&skb->rbnode, &q->rb_fragments);
net/ipv4/inet_fragment.c
80
q->fragments_tail = skb;
net/ipv4/inet_fragment.c
81
q->last_run_head = skb;
net/ipv4/ip_fragment.c
102
static void ip4_frag_free(struct inet_frag_queue *q)
net/ipv4/ip_fragment.c
106
qp = container_of(q, struct ipq, q);
net/ipv4/ip_fragment.c
133
qp = container_of(frag, struct ipq, q);
net/ipv4/ip_fragment.c
134
net = qp->q.fqdir->net;
net/ipv4/ip_fragment.c
137
spin_lock(&qp->q.lock);
net/ipv4/ip_fragment.c
139
if (qp->q.flags & INET_FRAG_COMPLETE)
net/ipv4/ip_fragment.c
142
qp->q.flags |= INET_FRAG_DROP;
net/ipv4/ip_fragment.c
143
inet_frag_kill(&qp->q, &refs);
net/ipv4/ip_fragment.c
146
if (READ_ONCE(qp->q.fqdir->dead)) {
net/ipv4/ip_fragment.c
147
inet_frag_queue_flush(&qp->q, 0);
net/ipv4/ip_fragment.c
154
if (!(qp->q.flags & INET_FRAG_FIRST_IN))
net/ipv4/ip_fragment.c
161
head = inet_frag_pull_head(&qp->q);
net/ipv4/ip_fragment.c
180
if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
net/ipv4/ip_fragment.c
184
spin_unlock(&qp->q.lock);
net/ipv4/ip_fragment.c
189
spin_unlock(&qp->q.lock);
net/ipv4/ip_fragment.c
193
inet_frag_putn(&qp->q, refs);
net/ipv4/ip_fragment.c
210
struct inet_frag_queue *q;
net/ipv4/ip_fragment.c
212
q = inet_frag_find(net->ipv4.fqdir, &key);
net/ipv4/ip_fragment.c
213
if (!q)
net/ipv4/ip_fragment.c
216
return container_of(q, struct ipq, q);
net/ipv4/ip_fragment.c
223
unsigned int max = qp->q.fqdir->max_dist;
net/ipv4/ip_fragment.c
235
rc = qp->q.fragments_tail && (end - start) > max;
net/ipv4/ip_fragment.c
238
__IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS);
net/ipv4/ip_fragment.c
245
if (!mod_timer_pending(&qp->q.timer, jiffies + qp->q.fqdir->timeout))
net/ipv4/ip_fragment.c
248
inet_frag_queue_flush(&qp->q, SKB_DROP_REASON_FRAG_TOO_FAR);
net/ipv4/ip_fragment.c
250
qp->q.flags = 0;
net/ipv4/ip_fragment.c
251
qp->q.len = 0;
net/ipv4/ip_fragment.c
252
qp->q.meat = 0;
net/ipv4/ip_fragment.c
253
qp->q.rb_fragments = RB_ROOT;
net/ipv4/ip_fragment.c
254
qp->q.fragments_tail = NULL;
net/ipv4/ip_fragment.c
255
qp->q.last_run_head = NULL;
net/ipv4/ip_fragment.c
265
struct net *net = qp->q.fqdir->net;
net/ipv4/ip_fragment.c
275
if (qp->q.flags & INET_FRAG_COMPLETE) {
net/ipv4/ip_fragment.c
283
inet_frag_kill(&qp->q, refs);
net/ipv4/ip_fragment.c
303
if (end < qp->q.len ||
net/ipv4/ip_fragment.c
304
((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
net/ipv4/ip_fragment.c
306
qp->q.flags |= INET_FRAG_LAST_IN;
net/ipv4/ip_fragment.c
307
qp->q.len = end;
net/ipv4/ip_fragment.c
314
if (end > qp->q.len) {
net/ipv4/ip_fragment.c
316
if (qp->q.flags & INET_FRAG_LAST_IN)
net/ipv4/ip_fragment.c
318
qp->q.len = end;
net/ipv4/ip_fragment.c
337
prev_tail = qp->q.fragments_tail;
net/ipv4/ip_fragment.c
338
err = inet_frag_queue_insert(&qp->q, skb, offset, end);
net/ipv4/ip_fragment.c
345
qp->q.stamp = skb->tstamp;
net/ipv4/ip_fragment.c
346
qp->q.tstamp_type = skb->tstamp_type;
net/ipv4/ip_fragment.c
347
qp->q.meat += skb->len;
net/ipv4/ip_fragment.c
349
add_frag_mem_limit(qp->q.fqdir, skb->truesize);
net/ipv4/ip_fragment.c
351
qp->q.flags |= INET_FRAG_FIRST_IN;
net/ipv4/ip_fragment.c
355
if (fragsize > qp->q.max_size)
net/ipv4/ip_fragment.c
356
qp->q.max_size = fragsize;
net/ipv4/ip_fragment.c
362
if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
net/ipv4/ip_fragment.c
363
qp->q.meat == qp->q.len) {
net/ipv4/ip_fragment.c
370
inet_frag_kill(&qp->q, refs);
net/ipv4/ip_fragment.c
387
inet_frag_kill(&qp->q, refs);
net/ipv4/ip_fragment.c
396
return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
net/ipv4/ip_fragment.c
404
struct net *net = qp->q.fqdir->net;
net/ipv4/ip_fragment.c
410
inet_frag_kill(&qp->q, refs);
net/ipv4/ip_fragment.c
419
reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
net/ipv4/ip_fragment.c
423
len = ip_hdrlen(skb) + qp->q.len;
net/ipv4/ip_fragment.c
428
inet_frag_reasm_finish(&qp->q, skb, reasm_data,
net/ipv4/ip_fragment.c
432
IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
net/ipv4/ip_fragment.c
446
if (qp->max_df_size == qp->q.max_size) {
net/ipv4/ip_fragment.c
456
qp->q.rb_fragments = RB_ROOT;
net/ipv4/ip_fragment.c
457
qp->q.fragments_tail = NULL;
net/ipv4/ip_fragment.c
458
qp->q.last_run_head = NULL;
net/ipv4/ip_fragment.c
466
net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
net/ipv4/ip_fragment.c
489
spin_lock(&qp->q.lock);
net/ipv4/ip_fragment.c
493
spin_unlock(&qp->q.lock);
net/ipv4/ip_fragment.c
495
inet_frag_putn(&qp->q, refs);
net/ipv4/ip_fragment.c
62
struct inet_frag_queue q;
net/ipv4/ip_fragment.c
83
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
net/ipv4/ip_fragment.c
85
struct ipq *qp = container_of(q, struct ipq, q);
net/ipv4/ip_fragment.c
87
struct net *net = q->fqdir->net;
net/ipv4/ip_fragment.c
90
q->key.v4 = *key;
net/ipv4/ip_fragment.c
92
if (q->fqdir->max_dist) {
net/ipv4/tcp_fastopen.c
149
struct fastopen_queue *q;
net/ipv4/tcp_fastopen.c
169
q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
net/ipv4/tcp_fastopen.c
170
octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx)));
net/ipv4/tcp_output.c
1296
struct list_head *q, *n;
net/ipv4/tcp_output.c
1304
list_for_each_safe(q, n, &list) {
net/ipv4/tcp_output.c
1305
tp = list_entry(q, struct tcp_sock, tsq_node);
net/ipv6/mcast.c
1543
struct sk_buff_head q;
net/ipv6/mcast.c
1548
skb_queue_head_init(&q);
net/ipv6/mcast.c
1552
__skb_queue_tail(&q, skb);
net/ipv6/mcast.c
1562
while ((skb = __skb_dequeue(&q)))
net/ipv6/mcast.c
1647
struct sk_buff_head q;
net/ipv6/mcast.c
1652
skb_queue_head_init(&q);
net/ipv6/mcast.c
1655
__skb_queue_tail(&q, skb);
net/ipv6/mcast.c
1665
while ((skb = __skb_dequeue(&q)))
net/ipv6/netfilter/nf_conntrack_reasm.c
139
fq = container_of(frag, struct frag_queue, q);
net/ipv6/netfilter/nf_conntrack_reasm.c
141
ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
net/ipv6/netfilter/nf_conntrack_reasm.c
156
struct inet_frag_queue *q;
net/ipv6/netfilter/nf_conntrack_reasm.c
162
q = inet_frag_find(nf_frag->fqdir, &key);
net/ipv6/netfilter/nf_conntrack_reasm.c
163
if (!q)
net/ipv6/netfilter/nf_conntrack_reasm.c
166
return container_of(q, struct frag_queue, q);
net/ipv6/netfilter/nf_conntrack_reasm.c
180
if (fq->q.flags & INET_FRAG_COMPLETE) {
net/ipv6/netfilter/nf_conntrack_reasm.c
210
if (end < fq->q.len ||
net/ipv6/netfilter/nf_conntrack_reasm.c
211
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
net/ipv6/netfilter/nf_conntrack_reasm.c
215
fq->q.flags |= INET_FRAG_LAST_IN;
net/ipv6/netfilter/nf_conntrack_reasm.c
216
fq->q.len = end;
net/ipv6/netfilter/nf_conntrack_reasm.c
226
inet_frag_kill(&fq->q, refs);
net/ipv6/netfilter/nf_conntrack_reasm.c
229
if (end > fq->q.len) {
net/ipv6/netfilter/nf_conntrack_reasm.c
231
if (fq->q.flags & INET_FRAG_LAST_IN) {
net/ipv6/netfilter/nf_conntrack_reasm.c
235
fq->q.len = end;
net/ipv6/netfilter/nf_conntrack_reasm.c
257
prev = fq->q.fragments_tail;
net/ipv6/netfilter/nf_conntrack_reasm.c
258
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
net/ipv6/netfilter/nf_conntrack_reasm.c
271
fq->q.stamp = skb->tstamp;
net/ipv6/netfilter/nf_conntrack_reasm.c
272
fq->q.tstamp_type = skb->tstamp_type;
net/ipv6/netfilter/nf_conntrack_reasm.c
273
fq->q.meat += skb->len;
net/ipv6/netfilter/nf_conntrack_reasm.c
275
if (payload_len > fq->q.max_size)
net/ipv6/netfilter/nf_conntrack_reasm.c
276
fq->q.max_size = payload_len;
net/ipv6/netfilter/nf_conntrack_reasm.c
277
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
net/ipv6/netfilter/nf_conntrack_reasm.c
284
fq->q.flags |= INET_FRAG_FIRST_IN;
net/ipv6/netfilter/nf_conntrack_reasm.c
287
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
net/ipv6/netfilter/nf_conntrack_reasm.c
288
fq->q.meat == fq->q.len) {
net/ipv6/netfilter/nf_conntrack_reasm.c
306
inet_frag_kill(&fq->q, refs);
net/ipv6/netfilter/nf_conntrack_reasm.c
327
inet_frag_kill(&fq->q, refs);
net/ipv6/netfilter/nf_conntrack_reasm.c
333
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
net/ipv6/netfilter/nf_conntrack_reasm.c
338
sizeof(struct ipv6hdr) + fq->q.len -
net/ipv6/netfilter/nf_conntrack_reasm.c
356
inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
net/ipv6/netfilter/nf_conntrack_reasm.c
362
IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
net/ipv6/netfilter/nf_conntrack_reasm.c
371
fq->q.rb_fragments = RB_ROOT;
net/ipv6/netfilter/nf_conntrack_reasm.c
372
fq->q.fragments_tail = NULL;
net/ipv6/netfilter/nf_conntrack_reasm.c
373
fq->q.last_run_head = NULL;
net/ipv6/netfilter/nf_conntrack_reasm.c
378
inet_frag_kill(&fq->q, refs);
net/ipv6/netfilter/nf_conntrack_reasm.c
489
spin_lock_bh(&fq->q.lock);
net/ipv6/netfilter/nf_conntrack_reasm.c
497
spin_unlock_bh(&fq->q.lock);
net/ipv6/netfilter/nf_conntrack_reasm.c
499
inet_frag_putn(&fq->q, refs);
net/ipv6/reassembly.c
100
q = inet_frag_find(net->ipv6.fqdir, &key);
net/ipv6/reassembly.c
101
if (!q)
net/ipv6/reassembly.c
104
return container_of(q, struct frag_queue, q);
net/ipv6/reassembly.c
120
if (fq->q.flags & INET_FRAG_COMPLETE) {
net/ipv6/reassembly.c
152
if (end < fq->q.len ||
net/ipv6/reassembly.c
153
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
net/ipv6/reassembly.c
155
fq->q.flags |= INET_FRAG_LAST_IN;
net/ipv6/reassembly.c
156
fq->q.len = end;
net/ipv6/reassembly.c
168
if (end > fq->q.len) {
net/ipv6/reassembly.c
170
if (fq->q.flags & INET_FRAG_LAST_IN)
net/ipv6/reassembly.c
172
fq->q.len = end;
net/ipv6/reassembly.c
193
prev_tail = fq->q.fragments_tail;
net/ipv6/reassembly.c
194
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
net/ipv6/reassembly.c
201
fq->q.stamp = skb->tstamp;
net/ipv6/reassembly.c
202
fq->q.tstamp_type = skb->tstamp_type;
net/ipv6/reassembly.c
203
fq->q.meat += skb->len;
net/ipv6/reassembly.c
205
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
net/ipv6/reassembly.c
208
if (fragsize > fq->q.max_size)
net/ipv6/reassembly.c
209
fq->q.max_size = fragsize;
net/ipv6/reassembly.c
216
fq->q.flags |= INET_FRAG_FIRST_IN;
net/ipv6/reassembly.c
219
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
net/ipv6/reassembly.c
220
fq->q.meat == fq->q.len) {
net/ipv6/reassembly.c
242
inet_frag_kill(&fq->q, refs);
net/ipv6/reassembly.c
261
struct net *net = fq->q.fqdir->net;
net/ipv6/reassembly.c
267
inet_frag_kill(&fq->q, refs);
net/ipv6/reassembly.c
273
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
net/ipv6/reassembly.c
278
sizeof(struct ipv6hdr) + fq->q.len -
net/ipv6/reassembly.c
295
inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
net/ipv6/reassembly.c
302
IP6CB(skb)->frag_max_size = fq->q.max_size;
net/ipv6/reassembly.c
309
fq->q.rb_fragments = RB_ROOT;
net/ipv6/reassembly.c
310
fq->q.fragments_tail = NULL;
net/ipv6/reassembly.c
311
fq->q.last_run_head = NULL;
net/ipv6/reassembly.c
321
inet_frag_kill(&fq->q, refs);
net/ipv6/reassembly.c
384
spin_lock(&fq->q.lock);
net/ipv6/reassembly.c
390
spin_unlock(&fq->q.lock);
net/ipv6/reassembly.c
392
inet_frag_putn(&fq->q, refs);
net/ipv6/reassembly.c
79
fq = container_of(frag, struct frag_queue, q);
net/ipv6/reassembly.c
81
ip6frag_expire_frag_queue(fq->q.fqdir->net, fq);
net/ipv6/reassembly.c
94
struct inet_frag_queue *q;
net/mac80211/debugfs.c
599
int q, res = 0;
net/mac80211/debugfs.c
602
for (q = 0; q < local->hw.queues; q++)
net/mac80211/debugfs.c
603
res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q,
net/mac80211/debugfs.c
604
local->queue_stop_reasons[q],
net/mac80211/debugfs.c
605
skb_queue_len(&local->pending[q]));
net/mac80211/ethtool.c
171
q = 0;
net/mac80211/ethtool.c
174
if (drv_get_survey(local, q, &survey) != 0) {
net/mac80211/ethtool.c
178
q++;
net/mac80211/ethtool.c
83
int i, q;
net/mac80211/mlme.c
3520
int q;
net/mac80211/mlme.c
3546
for (q = 0; q < local->hw.queues; q++) {
net/mac80211/mlme.c
3547
if (local->queue_stop_reasons[q]) {
net/mac80211/tx.c
1682
int q = info->hw_queue;
net/mac80211/tx.c
1685
if (WARN_ON_ONCE(q >= local->hw.queues)) {
net/mac80211/tx.c
1693
if (local->queue_stop_reasons[q] ||
net/mac80211/tx.c
1694
(!txpending && !skb_queue_empty(&local->pending[q]))) {
net/mac80211/tx.c
1697
if (local->queue_stop_reasons[q] &
net/mac80211/tx.c
1721
&local->pending[q]);
net/mac80211/tx.c
1724
&local->pending[q]);
net/mac80211/tx.c
3826
int q = vif->hw_queue[txq->ac];
net/mac80211/tx.c
3837
q_stopped = local->queue_stop_reasons[q];
net/mac80211/tx.c
4564
int q = info->hw_queue;
net/mac80211/tx.c
4568
if (local->queue_stop_reasons[q] ||
net/mac80211/tx.c
4569
(!txpending && !skb_queue_empty(&local->pending[q]))) {
net/mac80211/tx.c
4571
skb_queue_head(&local->pending[q], skb);
net/mac80211/tx.c
4573
skb_queue_tail(&local->pending[q], skb);
net/netfilter/nfnetlink_queue.c
1082
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1085
queue = instance_lookup(q, queuenum);
net/netfilter/nfnetlink_queue.c
1254
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1260
struct hlist_head *head = &q->instance_table[i];
net/netfilter/nfnetlink_queue.c
1287
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1297
if (!q)
net/netfilter/nfnetlink_queue.c
1302
struct hlist_head *head = &q->instance_table[i];
net/netfilter/nfnetlink_queue.c
1314
struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
net/netfilter/nfnetlink_queue.c
1320
spin_lock(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
1324
struct hlist_head *head = &q->instance_table[i];
net/netfilter/nfnetlink_queue.c
1331
spin_unlock(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
1362
verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
net/netfilter/nfnetlink_queue.c
1366
queue = instance_lookup(q, queue_num);
net/netfilter/nfnetlink_queue.c
1401
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
net/netfilter/nfnetlink_queue.c
1409
queue = verdict_instance_lookup(q, queue_num,
net/netfilter/nfnetlink_queue.c
1512
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
net/netfilter/nfnetlink_queue.c
1523
queue = verdict_instance_lookup(q, queue_num,
net/netfilter/nfnetlink_queue.c
1599
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
net/netfilter/nfnetlink_queue.c
161
instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
net/netfilter/nfnetlink_queue.c
1658
queue = instance_lookup(q, queue_num);
net/netfilter/nfnetlink_queue.c
166
head = &q->instance_table[instance_hashfn(queue_num)];
net/netfilter/nfnetlink_queue.c
1670
queue = instance_create(q, queue_num, NETLINK_CB(skb).portid);
net/netfilter/nfnetlink_queue.c
1677
instance_destroy(q, queue);
net/netfilter/nfnetlink_queue.c
175
instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
net/netfilter/nfnetlink_queue.c
1759
struct nfnl_queue_net *q;
net/netfilter/nfnetlink_queue.c
1765
q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1767
if (!hlist_empty(&q->instance_table[st->bucket]))
net/netfilter/nfnetlink_queue.c
1768
return q->instance_table[st->bucket].first;
net/netfilter/nfnetlink_queue.c
1780
struct nfnl_queue_net *q;
net/netfilter/nfnetlink_queue.c
1785
q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1786
h = q->instance_table[st->bucket].first;
net/netfilter/nfnetlink_queue.c
1845
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1848
INIT_HLIST_HEAD(&q->instance_table[i]);
net/netfilter/nfnetlink_queue.c
1850
spin_lock_init(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
1862
struct nfnl_queue_net *q = nfnl_queue_pernet(net);
net/netfilter/nfnetlink_queue.c
1869
WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
net/netfilter/nfnetlink_queue.c
193
spin_lock(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
194
if (instance_lookup(q, queue_num)) {
net/netfilter/nfnetlink_queue.c
205
hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
net/netfilter/nfnetlink_queue.c
207
spin_unlock(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
212
spin_unlock(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
241
instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
net/netfilter/nfnetlink_queue.c
243
spin_lock(&q->instances_lock);
net/netfilter/nfnetlink_queue.c
245
spin_unlock(&q->instances_lock);
net/netfilter/xt_quota.c
29
struct xt_quota_info *q = (void *)par->matchinfo;
net/netfilter/xt_quota.c
30
struct xt_quota_priv *priv = q->master;
net/netfilter/xt_quota.c
31
bool ret = q->flags & XT_QUOTA_INVERT;
net/netfilter/xt_quota.c
48
struct xt_quota_info *q = par->matchinfo;
net/netfilter/xt_quota.c
50
if (q->flags & ~XT_QUOTA_MASK)
net/netfilter/xt_quota.c
53
q->master = kmalloc_obj(*q->master);
net/netfilter/xt_quota.c
54
if (q->master == NULL)
net/netfilter/xt_quota.c
57
spin_lock_init(&q->master->lock);
net/netfilter/xt_quota.c
58
q->master->quota = q->quota;
net/netfilter/xt_quota.c
64
const struct xt_quota_info *q = par->matchinfo;
net/netfilter/xt_quota.c
66
kfree(q->master);
net/phonet/pep.c
924
struct sk_buff_head *q;
net/phonet/pep.c
930
q = &pn->ctrlreq_queue;
net/phonet/pep.c
931
spin_lock_bh(&q->lock);
net/phonet/pep.c
932
skb = skb_peek(q);
net/phonet/pep.c
937
spin_unlock_bh(&q->lock);
net/phonet/pep.c
941
q = &sk->sk_receive_queue;
net/phonet/pep.c
942
spin_lock_bh(&q->lock);
net/phonet/pep.c
943
skb = skb_peek(q);
net/phonet/pep.c
946
spin_unlock_bh(&q->lock);
net/rds/message.c
105
q = &rs->rs_zcookie_queue;
net/rds/message.c
106
spin_lock_irqsave(&q->lock, flags);
net/rds/message.c
107
head = &q->zcookie_head;
net/rds/message.c
112
spin_unlock_irqrestore(&q->lock, flags);
net/rds/message.c
123
list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
net/rds/message.c
125
spin_unlock_irqrestore(&q->lock, flags);
net/rds/message.c
77
void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q)
net/rds/message.c
83
spin_lock_irqsave(&q->lock, flags);
net/rds/message.c
84
list_splice(&q->zcookie_head, &copy);
net/rds/message.c
85
INIT_LIST_HEAD(&q->zcookie_head);
net/rds/message.c
86
spin_unlock_irqrestore(&q->lock, flags);
net/rds/message.c
98
struct rds_msg_zcopy_queue *q;
net/rds/rds.h
403
static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
net/rds/rds.h
405
spin_lock_init(&q->lock);
net/rds/rds.h
406
INIT_LIST_HEAD(&q->zcookie_head);
net/rds/recv.c
638
struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
net/rds/recv.c
650
spin_lock_irqsave(&q->lock, flags);
net/rds/recv.c
651
if (!list_empty(&q->zcookie_head)) {
net/rds/recv.c
652
info = list_entry(q->zcookie_head.next,
net/rds/recv.c
656
spin_unlock_irqrestore(&q->lock, flags);
net/rds/recv.c
662
spin_lock_irqsave(&q->lock, flags);
net/rds/recv.c
663
list_add(&info->rs_zcookie_next, &q->zcookie_head);
net/rds/recv.c
664
spin_unlock_irqrestore(&q->lock, flags);
net/rose/rose_in.c
102
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
net/rose/rose_in.c
267
int queued = 0, frametype, ns, nr, q, d, m;
net/rose/rose_in.c
272
frametype = rose_decode(skb, &ns, &nr, &q, &d, &m);
net/rose/rose_in.c
282
queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
net/rose/rose_subr.c
201
int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m)
net/rose/rose_subr.c
207
*ns = *nr = *q = *d = *m = 0;
net/rose/rose_subr.c
228
*q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
net/rxrpc/rxgk_app.c
107
q = payload + 5 * sizeof(__be32);
net/rxrpc/rxgk_app.c
108
q[0] = htonl(RXRPC_SECURITY_YFS_RXGK);
net/rxrpc/rxgk_app.c
109
q[1] = t[1]; /* begintime - msw */
net/rxrpc/rxgk_app.c
110
q[2] = t[2]; /* - lsw */
net/rxrpc/rxgk_app.c
111
q[3] = t[5]; /* endtime - msw */
net/rxrpc/rxgk_app.c
112
q[4] = t[6]; /* - lsw */
net/rxrpc/rxgk_app.c
113
q[5] = 0; /* level - msw */
net/rxrpc/rxgk_app.c
114
q[6] = t[0]; /* - lsw */
net/rxrpc/rxgk_app.c
115
q[7] = 0; /* lifetime - msw */
net/rxrpc/rxgk_app.c
116
q[8] = t[3]; /* - lsw */
net/rxrpc/rxgk_app.c
117
q[9] = 0; /* bytelife - msw */
net/rxrpc/rxgk_app.c
118
q[10] = t[4]; /* - lsw */
net/rxrpc/rxgk_app.c
119
q[11] = 0; /* enctype - msw */
net/rxrpc/rxgk_app.c
120
q[12] = htonl(enctype); /* - lsw */
net/rxrpc/rxgk_app.c
121
q[13] = htonl(klen); /* Key length */
net/rxrpc/rxgk_app.c
123
q += 14;
net/rxrpc/rxgk_app.c
125
memcpy(q, ticket + sizeof(__be32) * 2, klen);
net/rxrpc/rxgk_app.c
126
q += xdr_round_up(klen) / 4;
net/rxrpc/rxgk_app.c
127
q[0] = htonl(ticket_len);
net/rxrpc/rxgk_app.c
128
q++;
net/rxrpc/rxgk_app.c
129
if (WARN_ON((unsigned long)q != (unsigned long)ticket)) {
net/rxrpc/rxgk_app.c
135
q += xdr_round_up(ticket_len) / 4;
net/rxrpc/rxgk_app.c
136
if (WARN_ON((unsigned long)q - (unsigned long)payload != payload_len)) {
net/rxrpc/rxgk_app.c
52
__be32 *t, *p, *q, tmp[2];
net/rxrpc/rxkad.c
962
u8 *p, *q, *name, *end;
net/rxrpc/rxkad.c
989
q = memchr(p, 0, end - p); \
net/rxrpc/rxkad.c
990
if (!q || q - p > field##_SZ) \
net/rxrpc/rxkad.c
994
for (; p < q; p++) \
net/sched/bpf_qdisc.c
228
struct bpf_sched_data *q = qdisc_priv(sch);
net/sched/bpf_qdisc.c
230
qdisc_watchdog_schedule_range_ns(&q->watchdog, expire, delta_ns);
net/sched/bpf_qdisc.c
237
struct bpf_sched_data *q = qdisc_priv(sch);
net/sched/bpf_qdisc.c
241
qdisc_watchdog_init(&q->watchdog, sch);
net/sched/bpf_qdisc.c
263
struct bpf_sched_data *q = qdisc_priv(sch);
net/sched/bpf_qdisc.c
265
qdisc_watchdog_cancel(&q->watchdog);
net/sched/bpf_qdisc.c
62
case offsetof(struct Qdisc, q) + offsetof(struct qdisc_skb_head, qlen):
net/sched/bpf_qdisc.c
63
*end = offsetof(struct Qdisc, q) + offsetofend(struct qdisc_skb_head, qlen);
net/sched/cls_api.c
1013
static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
net/sched/cls_api.c
1039
block->q = q;
net/sched/cls_api.c
1180
static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
net/sched/cls_api.c
1202
*q = rcu_dereference(dev->qdisc);
net/sched/cls_api.c
1203
*parent = (*q)->handle;
net/sched/cls_api.c
1205
*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
net/sched/cls_api.c
1206
if (!*q) {
net/sched/cls_api.c
1213
*q = qdisc_refcount_inc_nz(*q);
net/sched/cls_api.c
1214
if (!*q) {
net/sched/cls_api.c
1221
cops = (*q)->ops->cl_ops;
net/sched/cls_api.c
1247
qdisc_put(*q);
net/sched/cls_api.c
1249
qdisc_put_unlocked(*q);
net/sched/cls_api.c
1250
*q = NULL;
net/sched/cls_api.c
1255
static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
net/sched/cls_api.c
1263
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
net/sched/cls_api.c
1265
*cl = cops->find(q, parent);
net/sched/cls_api.c
1275
static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
net/sched/cls_api.c
1289
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
net/sched/cls_api.c
1291
block = cops->tcf_block(q, cl, extack);
net/sched/cls_api.c
1312
static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
1328
if (q)
net/sched/cls_api.c
1329
tcf_block_offload_unbind(block, q, ei);
net/sched/cls_api.c
1335
} else if (q) {
net/sched/cls_api.c
1336
tcf_block_offload_unbind(block, q, ei);
net/sched/cls_api.c
1349
static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
net/sched/cls_api.c
1359
err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
net/sched/cls_api.c
1363
err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
net/sched/cls_api.c
1367
block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
net/sched/cls_api.c
1376
if (*q)
net/sched/cls_api.c
1377
qdisc_put(*q);
net/sched/cls_api.c
1379
*q = NULL;
net/sched/cls_api.c
1383
static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
net/sched/cls_api.c
1389
if (q) {
net/sched/cls_api.c
1391
qdisc_put(q);
net/sched/cls_api.c
1393
qdisc_put_unlocked(q);
net/sched/cls_api.c
1399
struct Qdisc *q;
net/sched/cls_api.c
1405
struct Qdisc *q,
net/sched/cls_api.c
1411
netif_keep_dst(qdisc_dev(q));
net/sched/cls_api.c
1420
tcf_block_owner_netif_keep_dst(block, item->q,
net/sched/cls_api.c
1426
struct Qdisc *q,
net/sched/cls_api.c
1434
item->q = q;
net/sched/cls_api.c
1441
struct Qdisc *q,
net/sched/cls_api.c
1447
if (item->q == q && item->binder_type == binder_type) {
net/sched/cls_api.c
1464
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
net/sched/cls_api.c
1468
struct net_device *dev = qdisc_dev(q);
net/sched/cls_api.c
1469
struct net *net = qdisc_net(q);
net/sched/cls_api.c
1478
block = tcf_block_create(net, q, ei->block_index, extack);
net/sched/cls_api.c
1488
err = tcf_block_owner_add(block, q, ei->binder_type);
net/sched/cls_api.c
1492
tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
net/sched/cls_api.c
1498
err = tcf_block_offload_bind(block, q, ei, extack);
net/sched/cls_api.c
1514
tcf_block_offload_unbind(block, q, ei);
net/sched/cls_api.c
1518
tcf_block_owner_del(block, q, ei->binder_type);
net/sched/cls_api.c
1534
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
net/sched/cls_api.c
1543
return tcf_block_get_ext(p_block, q, &ei, extack);
net/sched/cls_api.c
1550
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
1553
struct net_device *dev = qdisc_dev(q);
net/sched/cls_api.c
1560
tcf_block_owner_del(block, q, ei->binder_type);
net/sched/cls_api.c
1562
__tcf_block_put(block, q, ei, true);
net/sched/cls_api.c
1572
tcf_block_put_ext(block, block->q, &ei);
net/sched/cls_api.c
2052
struct Qdisc *q, u32 parent, void *fh,
net/sched/cls_api.c
2069
if (q) {
net/sched/cls_api.c
2070
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
net/sched/cls_api.c
2118
struct Qdisc *q, u32 parent,
net/sched/cls_api.c
2132
ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
net/sched/cls_api.c
2148
struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
2159
skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
net/sched/cls_api.c
2174
struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
2185
skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
net/sched/cls_api.c
2207
struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
2216
tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
net/sched/cls_api.c
2231
static bool is_ingress_or_clsact(struct tcf_block *block, struct Qdisc *q)
net/sched/cls_api.c
2233
return tcf_block_shared(block) || (q && !!(q->flags & TCQ_F_INGRESS));
net/sched/cls_api.c
2248
struct Qdisc *q;
net/sched/cls_api.c
2276
q = NULL;
net/sched/cls_api.c
2295
err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
net/sched/cls_api.c
2310
(q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
net/sched/cls_api.c
2316
err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
net/sched/cls_api.c
2320
block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
net/sched/cls_api.c
2428
if (is_ingress_or_clsact(block, q))
net/sched/cls_api.c
2433
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
net/sched/cls_api.c
2438
if (q)
net/sched/cls_api.c
2439
q->flags &= ~TCQ_F_CAN_BYPASS;
net/sched/cls_api.c
2452
tcf_block_release(q, block, rtnl_held);
net/sched/cls_api.c
2483
struct Qdisc *q = NULL;
net/sched/cls_api.c
2510
err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
net/sched/cls_api.c
2524
(q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
net/sched/cls_api.c
2530
err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
net/sched/cls_api.c
2534
block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
net/sched/cls_api.c
2562
tfilter_notify_chain(net, skb, block, q, parent, n,
net/sched/cls_api.c
2589
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
net/sched/cls_api.c
2604
err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
net/sched/cls_api.c
2619
tcf_block_release(q, block, rtnl_held);
net/sched/cls_api.c
2642
struct Qdisc *q = NULL;
net/sched/cls_api.c
2669
err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
net/sched/cls_api.c
2682
if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
net/sched/cls_api.c
2688
err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
net/sched/cls_api.c
2692
block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
net/sched/cls_api.c
2735
err = tfilter_notify(net, skb, n, tp, block, q, parent,
net/sched/cls_api.c
2748
tcf_block_release(q, block, rtnl_held);
net/sched/cls_api.c
2761
struct Qdisc *q;
net/sched/cls_api.c
2771
return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
net/sched/cls_api.c
2777
static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
net/sched/cls_api.c
2805
if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
net/sched/cls_api.c
2818
arg.q = q;
net/sched/cls_api.c
2849
struct Qdisc *q = NULL;
net/sched/cls_api.c
2895
q = rtnl_dereference(dev->qdisc);
net/sched/cls_api.c
2897
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
net/sched/cls_api.c
2898
if (!q)
net/sched/cls_api.c
2900
cops = q->ops->cl_ops;
net/sched/cls_api.c
2906
cl = cops->find(q, tcm->tcm_parent);
net/sched/cls_api.c
2910
block = cops->tcf_block(q, cl, NULL);
net/sched/cls_api.c
2915
q = NULL;
net/sched/cls_api.c
2929
if (!tcf_chain_dump(chain, q, parent, skb, cb,
net/sched/cls_api.c
2973
if (block->q) {
net/sched/cls_api.c
2974
tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
net/sched/cls_api.c
2975
tcm->tcm_parent = block->q->handle;
net/sched/cls_api.c
3121
struct Qdisc *q;
net/sched/cls_api.c
3128
q = NULL;
net/sched/cls_api.c
3138
block = tcf_block_find(net, &q, &parent, &cl,
net/sched/cls_api.c
3209
tfilter_notify_chain(net, skb, block, q, parent, n,
net/sched/cls_api.c
3233
tcf_block_release(q, block, true);
net/sched/cls_api.c
3249
struct Qdisc *q = NULL;
net/sched/cls_api.c
3279
q = rtnl_dereference(dev->qdisc);
net/sched/cls_api.c
3281
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
net/sched/cls_api.c
3283
if (!q)
net/sched/cls_api.c
3285
cops = q->ops->cl_ops;
net/sched/cls_api.c
3291
cl = cops->find(q, tcm->tcm_parent);
net/sched/cls_api.c
3295
block = cops->tcf_block(q, cl, NULL);
net/sched/cls_api.c
3299
q = NULL;
net/sched/cls_api.c
855
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
859
struct net_device *dev = q->dev_queue->dev;
net/sched/cls_api.c
875
err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
net/sched/cls_api.c
895
static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
net/sched/cls_api.c
898
struct net_device *dev = q->dev_queue->dev;
net/sched/cls_api.c
902
err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
net/sched/cls_basic.c
261
static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
net/sched/cls_basic.c
266
tc_cls_bind_class(classid, cl, q, &f->res, base);
net/sched/cls_bpf.c
631
void *q, unsigned long base)
net/sched/cls_bpf.c
635
tc_cls_bind_class(classid, cl, q, &prog->res, base);
net/sched/cls_flow.c
507
struct Qdisc *q;
net/sched/cls_flow.c
515
q = tcf_block_q(block);
net/sched/cls_flow.c
516
baseclass = TC_H_MAKE(q->handle, baseclass);
net/sched/cls_flower.c
3813
static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
net/sched/cls_flower.c
3818
tc_cls_bind_class(classid, cl, q, &f->res, base);
net/sched/cls_fw.c
425
static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
net/sched/cls_fw.c
430
tc_cls_bind_class(classid, cl, q, &f->res, base);
net/sched/cls_fw.c
77
struct Qdisc *q = tcf_block_q(tp->chain->block);
net/sched/cls_fw.c
81
!(TC_H_MAJ(id ^ q->handle)))) {
net/sched/cls_matchall.c
381
static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
net/sched/cls_matchall.c
386
tc_cls_bind_class(classid, cl, q, &head->res, base);
net/sched/cls_route.c
653
static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
net/sched/cls_route.c
658
tc_cls_bind_class(classid, cl, q, &f->res, base);
net/sched/cls_u32.c
1334
static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
net/sched/cls_u32.c
1339
tc_cls_bind_class(classid, cl, q, &n->res, base);
net/sched/cls_u32.c
343
return block->q;
net/sched/sch_api.c
1003
if (!tc_qdisc_dump_ignore(q, false)) {
net/sched/sch_api.c
1004
if (tc_fill_qdisc(skb, q, clid, portid, n->nlmsg_seq, 0,
net/sched/sch_api.c
1092
struct Qdisc *q = old;
net/sched/sch_api.c
1101
if ((q && q->flags & TCQ_F_INGRESS) ||
net/sched/sch_api.c
1110
q = rtnl_dereference(dev_queue->qdisc_sleeping);
net/sched/sch_api.c
1115
if (!qdisc_refcount_dec_if_one(q)) {
net/sched/sch_api.c
135
struct Qdisc_ops *q, **qp;
net/sched/sch_api.c
139
for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
net/sched/sch_api.c
140
if (!strcmp(qops->id, q->id))
net/sched/sch_api.c
1418
static int check_loop_fn(struct Qdisc *q, unsigned long cl,
net/sched/sch_api.c
1421
static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
net/sched/sch_api.c
1425
if (q->ops->cl_ops == NULL)
net/sched/sch_api.c
1432
q->ops->cl_ops->walk(q, &arg.w);
net/sched/sch_api.c
1437
check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
net/sched/sch_api.c
1440
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
net/sched/sch_api.c
1443
leaf = cops->leaf(q, cl);
net/sched/sch_api.c
1474
struct Qdisc *q = NULL;
net/sched/sch_api.c
1488
q = qdisc_leaf(p, clid, extack);
net/sched/sch_api.c
1490
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
net/sched/sch_api.c
1493
q = rtnl_dereference(dev->qdisc);
net/sched/sch_api.c
1495
if (!q) {
net/sched/sch_api.c
1499
if (IS_ERR(q))
net/sched/sch_api.c
1500
return PTR_ERR(q);
net/sched/sch_api.c
1502
if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
net/sched/sch_api.c
1507
q = qdisc_lookup(dev, tcm->tcm_handle);
net/sched/sch_api.c
1508
if (!q) {
net/sched/sch_api.c
1514
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
net/sched/sch_api.c
1524
if (q->handle == 0) {
net/sched/sch_api.c
1528
err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
net/sched/sch_api.c
1532
qdisc_get_notify(net, skb, n, clid, q, NULL);
net/sched/sch_api.c
1587
struct Qdisc *q = NULL;
net/sched/sch_api.c
1607
q = qdisc_leaf(p, clid, extack);
net/sched/sch_api.c
1608
if (IS_ERR(q))
net/sched/sch_api.c
1609
return PTR_ERR(q);
net/sched/sch_api.c
1611
q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
net/sched/sch_api.c
1614
q = rtnl_dereference(dev->qdisc);
net/sched/sch_api.c
1618
if (q && q->handle == 0)
net/sched/sch_api.c
1619
q = NULL;
net/sched/sch_api.c
1621
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
net/sched/sch_api.c
1623
if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
net/sched/sch_api.c
1631
q = qdisc_lookup(dev, tcm->tcm_handle);
net/sched/sch_api.c
1632
if (!q)
net/sched/sch_api.c
1634
if (q->parent != tcm->tcm_parent) {
net/sched/sch_api.c
1643
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
net/sched/sch_api.c
1647
if (q->flags & TCQ_F_INGRESS) {
net/sched/sch_api.c
1652
if (q == p ||
net/sched/sch_api.c
1653
(p && check_loop(q, p, 0))) {
net/sched/sch_api.c
1661
qdisc_refcount_inc(q);
net/sched/sch_api.c
1664
if (!q)
net/sched/sch_api.c
1693
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
net/sched/sch_api.c
1707
q = qdisc_lookup(dev, tcm->tcm_handle);
net/sched/sch_api.c
1711
if (!q) {
net/sched/sch_api.c
1719
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
net/sched/sch_api.c
1723
err = qdisc_change(q, tca, extack);
net/sched/sch_api.c
1725
qdisc_notify(sock_net(skb->sk), skb, n, clid, NULL, q, extack);
net/sched/sch_api.c
1736
q = qdisc_create(dev, dev_ingress_queue(dev),
net/sched/sch_api.c
1753
q = qdisc_create(dev, dev_queue,
net/sched/sch_api.c
1757
if (!q)
net/sched/sch_api.c
1761
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
net/sched/sch_api.c
1763
if (q)
net/sched/sch_api.c
1764
qdisc_put(q);
net/sched/sch_api.c
179
struct Qdisc_ops *q, **qp;
net/sched/sch_api.c
1829
struct Qdisc *q;
net/sched/sch_api.c
183
for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
net/sched/sch_api.c
1835
q = root;
net/sched/sch_api.c
1839
if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
net/sched/sch_api.c
184
if (q == qops)
net/sched/sch_api.c
1840
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
net/sched/sch_api.c
1856
hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
net/sched/sch_api.c
186
if (q) {
net/sched/sch_api.c
1861
if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
net/sched/sch_api.c
1862
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
net/sched/sch_api.c
187
*qp = q->next;
net/sched/sch_api.c
188
q->next = NULL;
net/sched/sch_api.c
1942
static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
net/sched/sch_api.c
1950
const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
net/sched/sch_api.c
1960
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
net/sched/sch_api.c
1961
tcm->tcm_parent = q->handle;
net/sched/sch_api.c
1962
tcm->tcm_handle = q->handle;
net/sched/sch_api.c
1964
if (nla_put_string(skb, TCA_KIND, q->ops->id))
net/sched/sch_api.c
1966
if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
net/sched/sch_api.c
1973
if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
net/sched/sch_api.c
1994
struct nlmsghdr *n, struct Qdisc *q,
net/sched/sch_api.c
2007
if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
net/sched/sch_api.c
2017
struct nlmsghdr *n, struct Qdisc *q,
net/sched/sch_api.c
2027
if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, RTM_NEWTCLASS,
net/sched/sch_api.c
2040
struct Qdisc *q, unsigned long cl,
net/sched/sch_api.c
2055
if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
net/sched/sch_api.c
2064
err = cops->delete(q, cl, extack);
net/sched/sch_api.c
207
struct Qdisc_ops *q = NULL;
net/sched/sch_api.c
2089
struct Qdisc *q = tcf_block_q(tp->chain->block);
net/sched/sch_api.c
209
for (q = qdisc_base; q; q = q->next) {
net/sched/sch_api.c
2091
sch_tree_lock(q);
net/sched/sch_api.c
2092
tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
net/sched/sch_api.c
2093
sch_tree_unlock(q);
net/sched/sch_api.c
210
if (!strcmp(name, q->id)) {
net/sched/sch_api.c
2105
static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
net/sched/sch_api.c
2109
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
net/sched/sch_api.c
211
if (!bpf_try_module_get(q, q->owner))
net/sched/sch_api.c
2113
block = cops->tcf_block(q, cl, NULL);
net/sched/sch_api.c
212
q = NULL;
net/sched/sch_api.c
2136
static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
net/sched/sch_api.c
2139
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
net/sched/sch_api.c
2148
q->ops->cl_ops->walk(q, &args.w);
net/sched/sch_api.c
2153
static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
net/sched/sch_api.c
2168
struct Qdisc *q = NULL;
net/sched/sch_api.c
217
return q;
net/sched/sch_api.c
2220
q = qdisc_lookup(dev, qid);
net/sched/sch_api.c
2221
if (!q)
net/sched/sch_api.c
2225
cops = q->ops->cl_ops;
net/sched/sch_api.c
2237
cl = cops->find(q, clid);
net/sched/sch_api.c
2252
err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
net/sched/sch_api.c
2254
tc_bind_tclass(q, portid, clid, 0);
net/sched/sch_api.c
2257
err = tclass_get_notify(net, skb, n, q, cl, extack);
net/sched/sch_api.c
2279
err = cops->change(q, clid, portid, tca, &new_cl, extack);
net/sched/sch_api.c
2281
tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
net/sched/sch_api.c
2284
tc_bind_tclass(q, portid, clid, new_cl);
net/sched/sch_api.c
2321
static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
net/sched/sch_api.c
2326
return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
net/sched/sch_api.c
2331
static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
net/sched/sch_api.c
2337
if (tc_qdisc_dump_ignore(q, false) ||
net/sched/sch_api.c
2338
*t_p < s_t || !q->ops->cl_ops ||
net/sched/sch_api.c
2340
TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
net/sched/sch_api.c
2352
q->ops->cl_ops->walk(q, &arg.w);
net/sched/sch_api.c
2364
struct Qdisc *q;
net/sched/sch_api.c
2377
q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
net/sched/sch_api.c
2378
if (q && q != root &&
net/sched/sch_api.c
2379
tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
net/sched/sch_api.c
2383
hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
net/sched/sch_api.c
2384
if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
net/sched/sch_api.c
265
struct Qdisc *q;
net/sched/sch_api.c
274
hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
net/sched/sch_api.c
276
if (q->handle == handle)
net/sched/sch_api.c
277
return q;
net/sched/sch_api.c
282
void qdisc_hash_add(struct Qdisc *q, bool invisible)
net/sched/sch_api.c
284
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
net/sched/sch_api.c
286
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
net/sched/sch_api.c
288
q->flags |= TCQ_F_INVISIBLE;
net/sched/sch_api.c
293
void qdisc_hash_del(struct Qdisc *q)
net/sched/sch_api.c
295
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
net/sched/sch_api.c
297
hash_del_rcu(&q->hash);
net/sched/sch_api.c
304
struct Qdisc *q;
net/sched/sch_api.c
308
q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
net/sched/sch_api.c
309
if (q)
net/sched/sch_api.c
313
q = qdisc_match_from_root(
net/sched/sch_api.c
317
return q;
net/sched/sch_api.c
323
struct Qdisc *q;
net/sched/sch_api.c
327
q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
net/sched/sch_api.c
328
if (q)
net/sched/sch_api.c
333
q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
net/sched/sch_api.c
336
return q;
net/sched/sch_api.c
362
struct Qdisc_ops *q = NULL;
net/sched/sch_api.c
366
for (q = qdisc_base; q; q = q->next) {
net/sched/sch_api.c
367
if (nla_strcmp(kind, q->id) == 0) {
net/sched/sch_api.c
368
if (!bpf_try_module_get(q, q->owner))
net/sched/sch_api.c
369
q = NULL;
net/sched/sch_api.c
375
return q;
net/sched/sch_api.c
793
notify = !sch->q.qlen;
net/sched/sch_api.c
808
sch->q.qlen -= n;
net/sched/sch_api.c
898
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
net/sched/sch_api.c
920
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
net/sched/sch_api.c
922
tcm->tcm_handle = q->handle;
net/sched/sch_api.c
923
tcm->tcm_info = refcount_read(&q->refcnt);
net/sched/sch_api.c
924
if (nla_put_string(skb, TCA_KIND, q->ops->id))
net/sched/sch_api.c
926
if (q->ops->ingress_block_get) {
net/sched/sch_api.c
927
block_index = q->ops->ingress_block_get(q);
net/sched/sch_api.c
932
if (q->ops->egress_block_get) {
net/sched/sch_api.c
933
block_index = q->ops->egress_block_get(q);
net/sched/sch_api.c
938
if (q->ops->dump && q->ops->dump(q, skb) < 0)
net/sched/sch_api.c
940
if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
net/sched/sch_api.c
942
qlen = qdisc_qlen_sum(q);
net/sched/sch_api.c
944
stab = rtnl_dereference(q->stab);
net/sched/sch_api.c
952
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
net/sched/sch_api.c
955
if (qdisc_is_percpu_stats(q)) {
net/sched/sch_api.c
956
cpu_bstats = q->cpu_bstats;
net/sched/sch_api.c
957
cpu_qstats = q->cpu_qstats;
net/sched/sch_api.c
960
if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
net/sched/sch_api.c
961
gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
net/sched/sch_api.c
962
gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
net/sched/sch_api.c
982
static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
net/sched/sch_api.c
984
if (q->flags & TCQ_F_BUILTIN)
net/sched/sch_api.c
986
if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
net/sched/sch_api.c
993
struct nlmsghdr *n, u32 clid, struct Qdisc *q,
net/sched/sch_cake.c
1211
static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
net/sched/sch_cake.c
1214
bool aggressive = q->config->ack_filter == CAKE_ACK_AGGRESSIVE;
net/sched/sch_cake.c
1376
struct cake_sched_config *q = qd->config;
net/sched/sch_cake.c
1378
if (q->rate_flags & CAKE_FLAG_OVERHEAD)
net/sched/sch_cake.c
1386
len += q->rate_overhead;
net/sched/sch_cake.c
1388
if (len < q->rate_mpu)
net/sched/sch_cake.c
1389
len = q->rate_mpu;
net/sched/sch_cake.c
1391
if (q->atm_mode == CAKE_ATM_ATM) {
net/sched/sch_cake.c
1395
} else if (q->atm_mode == CAKE_ATM_PTM) {
net/sched/sch_cake.c
1411
static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
net/sched/sch_cake.c
1419
q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
net/sched/sch_cake.c
1422
return cake_calc_overhead(q, len, off);
net/sched/sch_cake.c
1451
return (cake_calc_overhead(q, len, off) * (segs - 1) +
net/sched/sch_cake.c
1452
cake_calc_overhead(q, last_len, off));
net/sched/sch_cake.c
1455
static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
net/sched/sch_cake.c
1457
struct cake_heap_entry ii = q->overflow_heap[i];
net/sched/sch_cake.c
1458
struct cake_heap_entry jj = q->overflow_heap[j];
net/sched/sch_cake.c
1460
q->overflow_heap[i] = jj;
net/sched/sch_cake.c
1461
q->overflow_heap[j] = ii;
net/sched/sch_cake.c
1463
q->tins[ii.t].overflow_idx[ii.b] = j;
net/sched/sch_cake.c
1464
q->tins[jj.t].overflow_idx[jj.b] = i;
net/sched/sch_cake.c
1467
static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
net/sched/sch_cake.c
1469
struct cake_heap_entry ii = q->overflow_heap[i];
net/sched/sch_cake.c
1471
return q->tins[ii.t].backlogs[ii.b];
net/sched/sch_cake.c
1474
static void cake_heapify(struct cake_sched_data *q, u16 i)
net/sched/sch_cake.c
1477
u32 mb = cake_heap_get_backlog(q, i);
net/sched/sch_cake.c
1485
u32 lb = cake_heap_get_backlog(q, l);
net/sched/sch_cake.c
1494
u32 rb = cake_heap_get_backlog(q, r);
net/sched/sch_cake.c
1503
cake_heap_swap(q, i, m);
net/sched/sch_cake.c
1511
static void cake_heapify_up(struct cake_sched_data *q, u16 i)
net/sched/sch_cake.c
1515
u32 ib = cake_heap_get_backlog(q, i);
net/sched/sch_cake.c
1516
u32 pb = cake_heap_get_backlog(q, p);
net/sched/sch_cake.c
1519
cake_heap_swap(q, i, p);
net/sched/sch_cake.c
1527
static int cake_advance_shaper(struct cake_sched_data *q,
net/sched/sch_cake.c
1537
if (q->rate_ns) {
net/sched/sch_cake.c
1539
u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
net/sched/sch_cake.c
1550
q->time_next_packet = ktime_add_ns(q->time_next_packet,
net/sched/sch_cake.c
1553
q->failsafe_next_packet = \
net/sched/sch_cake.c
1554
ktime_add_ns(q->failsafe_next_packet,
net/sched/sch_cake.c
1562
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
1570
if (!q->overflow_timeout) {
net/sched/sch_cake.c
1574
cake_heapify(q, i);
net/sched/sch_cake.c
1576
q->overflow_timeout = 65535;
net/sched/sch_cake.c
1579
qq = q->overflow_heap[0];
net/sched/sch_cake.c
1583
b = &q->tins[tin];
net/sched/sch_cake.c
1588
q->overflow_timeout = 0;
net/sched/sch_cake.c
1596
q->buffer_used -= skb->truesize;
net/sched/sch_cake.c
1604
if (q->config->rate_flags & CAKE_FLAG_INGRESS)
net/sched/sch_cake.c
1605
cake_advance_shaper(q, b, skb, now, true);
net/sched/sch_cake.c
1608
sch->q.qlen--;
net/sched/sch_cake.c
1610
cake_heapify(q, 0);
net/sched/sch_cake.c
1675
struct cake_sched_config *q = qd->config;
net/sched/sch_cake.c
1684
mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
net/sched/sch_cake.c
1685
wash = !!(q->rate_flags & CAKE_FLAG_WASH);
net/sched/sch_cake.c
1689
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
net/sched/sch_cake.c
1715
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
1721
filter = rcu_dereference_bh(q->filter_list);
net/sched/sch_cake.c
1756
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
1765
idx = cake_classify(sch, &b, skb, q->config->flow_mode, &ret);
net/sched/sch_cake.c
1772
tin = (u32)(b - q->tins);
net/sched/sch_cake.c
1781
if (!sch->q.qlen) {
net/sched/sch_cake.c
1782
if (ktime_before(q->time_next_packet, now)) {
net/sched/sch_cake.c
1783
q->failsafe_next_packet = now;
net/sched/sch_cake.c
1784
q->time_next_packet = now;
net/sched/sch_cake.c
1785
} else if (ktime_after(q->time_next_packet, now) &&
net/sched/sch_cake.c
1786
ktime_after(q->failsafe_next_packet, now)) {
net/sched/sch_cake.c
1788
min(ktime_to_ns(q->time_next_packet),
net/sched/sch_cake.c
1790
q->failsafe_next_packet));
net/sched/sch_cake.c
1792
qdisc_watchdog_schedule_ns(&q->watchdog, next);
net/sched/sch_cake.c
1800
if (qdisc_pkt_segs(skb) > 1 && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) {
net/sched/sch_cake.c
1814
get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
net/sched/sch_cake.c
1818
sch->q.qlen++;
net/sched/sch_cake.c
1821
q->buffer_used += segs->truesize;
net/sched/sch_cake.c
1830
q->avg_window_bytes += slen;
net/sched/sch_cake.c
1839
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
net/sched/sch_cake.c
1842
if (q->config->ack_filter)
net/sched/sch_cake.c
1843
ack = cake_ack_filter(q, flow);
net/sched/sch_cake.c
1850
q->buffer_used += skb->truesize - ack->truesize;
net/sched/sch_cake.c
1851
if (q->config->rate_flags & CAKE_FLAG_INGRESS)
net/sched/sch_cake.c
1852
cake_advance_shaper(q, b, ack, now, true);
net/sched/sch_cake.c
1857
sch->q.qlen++;
net/sched/sch_cake.c
1858
q->buffer_used += skb->truesize;
net/sched/sch_cake.c
1867
q->avg_window_bytes += len - ack_pkt_len;
net/sched/sch_cake.c
1870
if (q->overflow_timeout)
net/sched/sch_cake.c
1871
cake_heapify_up(q, b->overflow_idx[idx]);
net/sched/sch_cake.c
1874
if (q->config->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
net/sched/sch_cake.c
1876
ktime_to_ns(ktime_sub(now, q->last_packet_time));
net/sched/sch_cake.c
1882
q->avg_packet_interval = \
net/sched/sch_cake.c
1883
cake_ewma(q->avg_packet_interval,
net/sched/sch_cake.c
1885
(packet_interval > q->avg_packet_interval ?
net/sched/sch_cake.c
1888
q->last_packet_time = now;
net/sched/sch_cake.c
1890
if (packet_interval > q->avg_packet_interval) {
net/sched/sch_cake.c
1893
q->avg_window_begin));
net/sched/sch_cake.c
1894
u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
net/sched/sch_cake.c
1897
q->avg_peak_bandwidth =
net/sched/sch_cake.c
1898
cake_ewma(q->avg_peak_bandwidth, b,
net/sched/sch_cake.c
1899
b > q->avg_peak_bandwidth ? 2 : 8);
net/sched/sch_cake.c
1900
q->avg_window_bytes = 0;
net/sched/sch_cake.c
1901
q->avg_window_begin = now;
net/sched/sch_cake.c
1904
ktime_add_ms(q->last_reconfig_time,
net/sched/sch_cake.c
1906
q->config->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
net/sched/sch_cake.c
1911
q->avg_window_bytes = 0;
net/sched/sch_cake.c
1912
q->last_packet_time = now;
net/sched/sch_cake.c
1926
flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
1935
cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
1936
cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
1939
if (q->buffer_used > q->buffer_max_used)
net/sched/sch_cake.c
1940
q->buffer_max_used = q->buffer_used;
net/sched/sch_cake.c
1942
if (q->buffer_used <= q->buffer_limit)
net/sched/sch_cake.c
1945
prev_qlen = sch->q.qlen;
net/sched/sch_cake.c
1948
while (q->buffer_used > q->buffer_limit) {
net/sched/sch_cake.c
1955
prev_qlen -= sch->q.qlen;
net/sched/sch_cake.c
1970
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
1971
struct cake_tin_data *b = &q->tins[q->cur_tin];
net/sched/sch_cake.c
1972
struct cake_flow *flow = &b->flows[q->cur_flow];
net/sched/sch_cake.c
1979
b->backlogs[q->cur_flow] -= len;
net/sched/sch_cake.c
1982
q->buffer_used -= skb->truesize;
net/sched/sch_cake.c
1983
sch->q.qlen--;
net/sched/sch_cake.c
1985
if (q->overflow_timeout)
net/sched/sch_cake.c
1986
cake_heapify(q, b->overflow_idx[q->cur_flow]);
net/sched/sch_cake.c
1994
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
1997
q->cur_tin = tin;
net/sched/sch_cake.c
1998
for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
net/sched/sch_cake.c
2005
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2006
struct cake_tin_data *b = &q->tins[q->cur_tin];
net/sched/sch_cake.c
2016
if (q->config->is_shared && q->rate_ns &&
net/sched/sch_cake.c
2017
now - q->last_checked_active >= q->config->sync_time) {
net/sched/sch_cake.c
2020
u64 new_rate = q->config->rate_bps;
net/sched/sch_cake.c
2030
if (other_priv == q)
net/sched/sch_cake.c
2033
other_qlen = READ_ONCE(other_sch->q.qlen);
net/sched/sch_cake.c
2036
if (other_qlen || other_last_active > q->last_checked_active)
net/sched/sch_cake.c
2041
new_rate = div64_u64(q->config->rate_bps, num_active_qs);
net/sched/sch_cake.c
2044
q->last_checked_active = now;
net/sched/sch_cake.c
2045
q->active_queues = num_active_qs;
net/sched/sch_cake.c
2049
if (!sch->q.qlen)
net/sched/sch_cake.c
2053
if (ktime_after(q->time_next_packet, now) &&
net/sched/sch_cake.c
2054
ktime_after(q->failsafe_next_packet, now)) {
net/sched/sch_cake.c
2055
u64 next = min(ktime_to_ns(q->time_next_packet),
net/sched/sch_cake.c
2056
ktime_to_ns(q->failsafe_next_packet));
net/sched/sch_cake.c
2059
qdisc_watchdog_schedule_ns(&q->watchdog, next);
net/sched/sch_cake.c
2064
if (!q->rate_ns) {
net/sched/sch_cake.c
2077
q->cur_tin++;
net/sched/sch_cake.c
2079
if (q->cur_tin >= q->tin_cnt) {
net/sched/sch_cake.c
2080
q->cur_tin = 0;
net/sched/sch_cake.c
2081
b = q->tins;
net/sched/sch_cake.c
2103
for (tin = 0; tin < q->tin_cnt; tin++) {
net/sched/sch_cake.c
2104
b = q->tins + tin;
net/sched/sch_cake.c
2118
q->cur_tin = best_tin;
net/sched/sch_cake.c
2119
b = q->tins + best_tin;
net/sched/sch_cake.c
2141
q->cur_flow = flow - b->flows;
net/sched/sch_cake.c
2155
cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2156
cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2168
flow->deficit += cake_get_flow_quantum(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2192
cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2193
cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2211
cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2212
cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode);
net/sched/sch_cake.c
2223
!!(q->config->rate_flags &
net/sched/sch_cake.c
2230
if (q->config->rate_flags & CAKE_FLAG_INGRESS) {
net/sched/sch_cake.c
2231
len = cake_advance_shaper(q, b, skb,
net/sched/sch_cake.c
2241
if (q->config->rate_flags & CAKE_FLAG_INGRESS)
net/sched/sch_cake.c
2247
WRITE_ONCE(q->last_active, now);
net/sched/sch_cake.c
2257
len = cake_advance_shaper(q, b, skb, now, false);
net/sched/sch_cake.c
2261
if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
net/sched/sch_cake.c
2262
u64 next = min(ktime_to_ns(q->time_next_packet),
net/sched/sch_cake.c
2263
ktime_to_ns(q->failsafe_next_packet));
net/sched/sch_cake.c
2265
qdisc_watchdog_schedule_ns(&q->watchdog, next);
net/sched/sch_cake.c
2266
} else if (!sch->q.qlen) {
net/sched/sch_cake.c
2269
for (i = 0; i < q->tin_cnt; i++) {
net/sched/sch_cake.c
2270
if (q->tins[i].decaying_flow_count) {
net/sched/sch_cake.c
2273
q->tins[i].cparams.target);
net/sched/sch_cake.c
2275
qdisc_watchdog_schedule_ns(&q->watchdog,
net/sched/sch_cake.c
2282
if (q->overflow_timeout)
net/sched/sch_cake.c
2283
q->overflow_timeout--;
net/sched/sch_cake.c
2290
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2293
if (!q->tins)
net/sched/sch_cake.c
2364
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2365
struct cake_tin_data *b = &q->tins[0];
net/sched/sch_cake.c
2367
q->tin_cnt = 1;
net/sched/sch_cake.c
2369
q->tin_index = besteffort;
net/sched/sch_cake.c
2370
q->tin_order = normal_order;
net/sched/sch_cake.c
2373
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2382
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2386
q->tin_cnt = 8;
net/sched/sch_cake.c
2387
q->tin_index = precedence;
net/sched/sch_cake.c
2388
q->tin_order = normal_order;
net/sched/sch_cake.c
2390
for (i = 0; i < q->tin_cnt; i++) {
net/sched/sch_cake.c
2391
struct cake_tin_data *b = &q->tins[i];
net/sched/sch_cake.c
2393
cake_set_rate(b, rate, mtu, us_to_ns(q->config->target),
net/sched/sch_cake.c
2394
us_to_ns(q->config->interval));
net/sched/sch_cake.c
2469
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2473
q->tin_cnt = 8;
net/sched/sch_cake.c
2476
q->tin_index = diffserv8;
net/sched/sch_cake.c
2477
q->tin_order = normal_order;
net/sched/sch_cake.c
2480
for (i = 0; i < q->tin_cnt; i++) {
net/sched/sch_cake.c
2481
struct cake_tin_data *b = &q->tins[i];
net/sched/sch_cake.c
2483
cake_set_rate(b, rate, mtu, us_to_ns(q->config->target),
net/sched/sch_cake.c
2484
us_to_ns(q->config->interval));
net/sched/sch_cake.c
2511
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2514
q->tin_cnt = 4;
net/sched/sch_cake.c
2517
q->tin_index = diffserv4;
net/sched/sch_cake.c
2518
q->tin_order = bulk_order;
net/sched/sch_cake.c
2521
cake_set_rate(&q->tins[0], rate, mtu,
net/sched/sch_cake.c
2522
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2523
cake_set_rate(&q->tins[1], rate >> 4, mtu,
net/sched/sch_cake.c
2524
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2525
cake_set_rate(&q->tins[2], rate >> 1, mtu,
net/sched/sch_cake.c
2526
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2527
cake_set_rate(&q->tins[3], rate >> 2, mtu,
net/sched/sch_cake.c
2528
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2531
q->tins[0].tin_quantum = quantum;
net/sched/sch_cake.c
2532
q->tins[1].tin_quantum = quantum >> 4;
net/sched/sch_cake.c
2533
q->tins[2].tin_quantum = quantum >> 1;
net/sched/sch_cake.c
2534
q->tins[3].tin_quantum = quantum >> 2;
net/sched/sch_cake.c
2546
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2549
q->tin_cnt = 3;
net/sched/sch_cake.c
2552
q->tin_index = diffserv3;
net/sched/sch_cake.c
2553
q->tin_order = bulk_order;
net/sched/sch_cake.c
2556
cake_set_rate(&q->tins[0], rate, mtu,
net/sched/sch_cake.c
2557
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2558
cake_set_rate(&q->tins[1], rate >> 4, mtu,
net/sched/sch_cake.c
2559
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2560
cake_set_rate(&q->tins[2], rate >> 2, mtu,
net/sched/sch_cake.c
2561
us_to_ns(q->config->target), us_to_ns(q->config->interval));
net/sched/sch_cake.c
2564
q->tins[0].tin_quantum = quantum;
net/sched/sch_cake.c
2565
q->tins[1].tin_quantum = quantum >> 4;
net/sched/sch_cake.c
2566
q->tins[2].tin_quantum = quantum >> 2;
net/sched/sch_cake.c
2575
struct cake_sched_config *q = qd->config;
net/sched/sch_cake.c
2578
switch (q->tin_mode) {
net/sched/sch_cake.c
2613
struct cake_sched_config *q = qd->config;
net/sched/sch_cake.c
2617
if (q->buffer_config_limit) {
net/sched/sch_cake.c
2618
qd->buffer_limit = q->buffer_config_limit;
net/sched/sch_cake.c
2619
} else if (q->rate_bps) {
net/sched/sch_cake.c
2620
u64 t = q->rate_bps * q->interval;
net/sched/sch_cake.c
2632
q->buffer_config_limit));
net/sched/sch_cake.c
2635
static int cake_config_change(struct cake_sched_config *q, struct nlattr *opt,
net/sched/sch_cake.c
2639
u16 rate_flags = q->rate_flags;
net/sched/sch_cake.c
2640
u8 flow_mode = q->flow_mode;
net/sched/sch_cake.c
2662
if (q->is_shared) {
net/sched/sch_cake.c
2674
WRITE_ONCE(q->rate_bps,
net/sched/sch_cake.c
2678
WRITE_ONCE(q->tin_mode,
net/sched/sch_cake.c
2694
WRITE_ONCE(q->atm_mode,
net/sched/sch_cake.c
2698
WRITE_ONCE(q->rate_overhead,
net/sched/sch_cake.c
2710
WRITE_ONCE(q->rate_mpu,
net/sched/sch_cake.c
2716
WRITE_ONCE(q->interval, max(interval, 1U));
net/sched/sch_cake.c
2722
WRITE_ONCE(q->target, max(target, 1U));
net/sched/sch_cake.c
2733
WRITE_ONCE(q->ack_filter,
net/sched/sch_cake.c
2737
WRITE_ONCE(q->buffer_config_limit,
net/sched/sch_cake.c
2748
WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK]));
net/sched/sch_cake.c
2749
WRITE_ONCE(q->fwmark_shft,
net/sched/sch_cake.c
2750
q->fwmark_mask ? __ffs(q->fwmark_mask) : 0);
net/sched/sch_cake.c
2753
WRITE_ONCE(q->rate_flags, rate_flags);
net/sched/sch_cake.c
2754
WRITE_ONCE(q->flow_mode, flow_mode);
net/sched/sch_cake.c
2763
struct cake_sched_config *q = qd->config;
net/sched/sch_cake.c
2767
if (q->is_shared) {
net/sched/sch_cake.c
2772
ret = cake_config_change(q, opt, extack, &overhead_changed);
net/sched/sch_cake.c
2794
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2796
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_cake.c
2797
tcf_block_put(q->block);
net/sched/sch_cake.c
2798
kvfree(q->tins);
net/sched/sch_cake.c
2801
static void cake_config_init(struct cake_sched_config *q, bool is_shared)
net/sched/sch_cake.c
2803
q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
net/sched/sch_cake.c
2804
q->flow_mode = CAKE_FLOW_TRIPLE;
net/sched/sch_cake.c
2806
q->rate_bps = 0; /* unlimited by default */
net/sched/sch_cake.c
2808
q->interval = 100000; /* 100ms default */
net/sched/sch_cake.c
2809
q->target = 5000; /* 5ms: codel RFC argues
net/sched/sch_cake.c
2812
q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
net/sched/sch_cake.c
2813
q->is_shared = is_shared;
net/sched/sch_cake.c
2814
q->sync_time = 200 * NSEC_PER_USEC;
net/sched/sch_cake.c
2821
struct cake_sched_config *q = &qd->initial_config;
net/sched/sch_cake.c
2824
cake_config_init(q, false);
net/sched/sch_cake.c
2831
qd->config = q;
net/sched/sch_cake.c
2877
qd->avg_peak_bandwidth = q->rate_bps;
net/sched/sch_cake.c
2894
static int cake_config_dump(struct cake_sched_config *q, struct sk_buff *skb)
net/sched/sch_cake.c
2905
READ_ONCE(q->rate_bps), TCA_CAKE_PAD))
net/sched/sch_cake.c
2908
flow_mode = READ_ONCE(q->flow_mode);
net/sched/sch_cake.c
2912
if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval)))
net/sched/sch_cake.c
2915
if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target)))
net/sched/sch_cake.c
2919
READ_ONCE(q->buffer_config_limit)))
net/sched/sch_cake.c
2922
rate_flags = READ_ONCE(q->rate_flags);
net/sched/sch_cake.c
2931
if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter)))
net/sched/sch_cake.c
2938
if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode)))
net/sched/sch_cake.c
2945
if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead)))
net/sched/sch_cake.c
2952
if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode)))
net/sched/sch_cake.c
2955
if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu)))
net/sched/sch_cake.c
2962
if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask)))
net/sched/sch_cake.c
2981
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
2998
PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
net/sched/sch_cake.c
2999
PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
net/sched/sch_cake.c
3000
PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
net/sched/sch_cake.c
3001
PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
net/sched/sch_cake.c
3002
PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
net/sched/sch_cake.c
3003
PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
net/sched/sch_cake.c
3004
PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
net/sched/sch_cake.c
3005
PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
net/sched/sch_cake.c
3006
PUT_STAT_U32(ACTIVE_QUEUES, q->active_queues);
net/sched/sch_cake.c
3025
for (i = 0; i < q->tin_cnt; i++) {
net/sched/sch_cake.c
3026
struct cake_tin_data *b = &q->tins[q->tin_order[i]];
net/sched/sch_cake.c
3094
static void cake_unbind(struct Qdisc *q, unsigned long cl)
net/sched/sch_cake.c
3101
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
3105
return q->block;
net/sched/sch_cake.c
3118
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
3124
if (idx < CAKE_QUEUES * q->tin_cnt) {
net/sched/sch_cake.c
3126
&q->tins[q->tin_order[idx / CAKE_QUEUES]];
net/sched/sch_cake.c
3191
struct cake_sched_data *q = qdisc_priv(sch);
net/sched/sch_cake.c
3197
for (i = 0; i < q->tin_cnt; i++) {
net/sched/sch_cake.c
3198
struct cake_tin_data *b = &q->tins[q->tin_order[i]];
net/sched/sch_cake.c
646
static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
net/sched/sch_cake.c
651
q->hosts[flow->srchost].srchost_bulk_flow_count))
net/sched/sch_cake.c
652
q->hosts[flow->srchost].srchost_bulk_flow_count--;
net/sched/sch_cake.c
655
static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
net/sched/sch_cake.c
660
q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
net/sched/sch_cake.c
661
q->hosts[flow->srchost].srchost_bulk_flow_count++;
net/sched/sch_cake.c
664
static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
net/sched/sch_cake.c
669
q->hosts[flow->dsthost].dsthost_bulk_flow_count))
net/sched/sch_cake.c
670
q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
net/sched/sch_cake.c
673
static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
net/sched/sch_cake.c
678
q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
net/sched/sch_cake.c
679
q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
net/sched/sch_cake.c
682
static u16 cake_get_flow_quantum(struct cake_tin_data *q,
net/sched/sch_cake.c
690
q->hosts[flow->srchost].srchost_bulk_flow_count);
net/sched/sch_cake.c
694
q->hosts[flow->dsthost].dsthost_bulk_flow_count);
net/sched/sch_cake.c
699
return (q->flow_quantum * quantum_div[host_load] +
net/sched/sch_cake.c
703
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
net/sched/sch_cake.c
799
if (likely(q->tags[reduced_hash] == flow_hash &&
net/sched/sch_cake.c
800
q->flows[reduced_hash].set)) {
net/sched/sch_cake.c
801
q->way_directs++;
net/sched/sch_cake.c
814
if (q->tags[outer_hash + k] == flow_hash) {
net/sched/sch_cake.c
816
q->way_hits++;
net/sched/sch_cake.c
818
if (!q->flows[outer_hash + k].set) {
net/sched/sch_cake.c
833
if (!q->flows[outer_hash + k].set) {
net/sched/sch_cake.c
834
q->way_misses++;
net/sched/sch_cake.c
844
q->way_collisions++;
net/sched/sch_cake.c
848
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
net/sched/sch_cake.c
849
cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
net/sched/sch_cake.c
850
cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
net/sched/sch_cake.c
855
q->tags[reduced_hash] = flow_hash;
net/sched/sch_cake.c
863
if (q->hosts[outer_hash + k].srchost_tag ==
net/sched/sch_cake.c
869
if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
net/sched/sch_cake.c
872
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
net/sched/sch_cake.c
875
q->flows[reduced_hash].srchost = srchost_idx;
net/sched/sch_cake.c
877
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
net/sched/sch_cake.c
878
cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
net/sched/sch_cake.c
887
if (q->hosts[outer_hash + k].dsthost_tag ==
net/sched/sch_cake.c
893
if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
net/sched/sch_cake.c
896
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
net/sched/sch_cake.c
899
q->flows[reduced_hash].dsthost = dsthost_idx;
net/sched/sch_cake.c
901
if (q->flows[reduced_hash].set == CAKE_SET_BULK)
net/sched/sch_cake.c
902
cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
net/sched/sch_cbs.c
100
sch->q.qlen++;
net/sched/sch_cbs.c
108
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
109
struct Qdisc *qdisc = q->qdisc;
net/sched/sch_cbs.c
117
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
118
struct Qdisc *qdisc = q->qdisc;
net/sched/sch_cbs.c
120
if (sch->q.qlen == 0 && q->credits > 0) {
net/sched/sch_cbs.c
124
q->credits = 0;
net/sched/sch_cbs.c
125
q->last = ktime_get_ns();
net/sched/sch_cbs.c
134
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
136
return q->enqueue(skb, sch, to_free);
net/sched/sch_cbs.c
171
sch->q.qlen--;
net/sched/sch_cbs.c
178
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
179
struct Qdisc *qdisc = q->qdisc;
net/sched/sch_cbs.c
186
if (now < q->last) {
net/sched/sch_cbs.c
187
qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
net/sched/sch_cbs.c
190
if (q->credits < 0) {
net/sched/sch_cbs.c
191
credits = timediff_to_credits(now - q->last, q->idleslope);
net/sched/sch_cbs.c
193
credits = q->credits + credits;
net/sched/sch_cbs.c
194
q->credits = min_t(s64, credits, q->hicredit);
net/sched/sch_cbs.c
196
if (q->credits < 0) {
net/sched/sch_cbs.c
199
delay = delay_from_credits(q->credits, q->idleslope);
net/sched/sch_cbs.c
200
qdisc_watchdog_schedule_ns(&q->watchdog, now + delay);
net/sched/sch_cbs.c
202
q->last = now;
net/sched/sch_cbs.c
216
credits = credits_from_len(len, q->sendslope,
net/sched/sch_cbs.c
217
atomic64_read(&q->port_rate));
net/sched/sch_cbs.c
218
credits += q->credits;
net/sched/sch_cbs.c
220
q->credits = max_t(s64, credits, q->locredit);
net/sched/sch_cbs.c
222
if (unlikely(atomic64_read(&q->port_rate) == 0))
net/sched/sch_cbs.c
223
q->last = now;
net/sched/sch_cbs.c
225
q->last = now + div64_s64(len * NSEC_PER_SEC,
net/sched/sch_cbs.c
226
atomic64_read(&q->port_rate));
net/sched/sch_cbs.c
233
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
234
struct Qdisc *qdisc = q->qdisc;
net/sched/sch_cbs.c
241
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
243
return q->dequeue(sch);
net/sched/sch_cbs.c
251
struct cbs_sched_data *q)
net/sched/sch_cbs.c
257
if (!q->offload)
net/sched/sch_cbs.c
260
q->enqueue = cbs_enqueue_soft;
net/sched/sch_cbs.c
261
q->dequeue = cbs_dequeue_soft;
net/sched/sch_cbs.c
267
cbs.queue = q->queue;
net/sched/sch_cbs.c
276
static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
net/sched/sch_cbs.c
289
cbs.queue = q->queue;
net/sched/sch_cbs.c
303
q->enqueue = cbs_enqueue_offload;
net/sched/sch_cbs.c
304
q->dequeue = cbs_dequeue_offload;
net/sched/sch_cbs.c
309
static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
net/sched/sch_cbs.c
326
atomic64_set(&q->port_rate, port_rate);
net/sched/sch_cbs.c
328
dev->name, (long long)atomic64_read(&q->port_rate),
net/sched/sch_cbs.c
336
struct cbs_sched_data *q;
net/sched/sch_cbs.c
346
list_for_each_entry(q, &cbs_list, cbs_list) {
net/sched/sch_cbs.c
347
qdev = qdisc_dev(q->qdisc);
net/sched/sch_cbs.c
356
cbs_set_port_rate(dev, q);
net/sched/sch_cbs.c
364
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
383
cbs_set_port_rate(dev, q);
net/sched/sch_cbs.c
384
cbs_disable_offload(dev, q);
net/sched/sch_cbs.c
386
err = cbs_enable_offload(dev, q, qopt, extack);
net/sched/sch_cbs.c
392
WRITE_ONCE(q->hicredit, qopt->hicredit);
net/sched/sch_cbs.c
393
WRITE_ONCE(q->locredit, qopt->locredit);
net/sched/sch_cbs.c
394
WRITE_ONCE(q->idleslope, qopt->idleslope * BYTES_PER_KBIT);
net/sched/sch_cbs.c
395
WRITE_ONCE(q->sendslope, qopt->sendslope * BYTES_PER_KBIT);
net/sched/sch_cbs.c
396
WRITE_ONCE(q->offload, qopt->offload);
net/sched/sch_cbs.c
404
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
412
q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
net/sched/sch_cbs.c
414
if (!q->qdisc)
net/sched/sch_cbs.c
418
list_add(&q->cbs_list, &cbs_list);
net/sched/sch_cbs.c
421
qdisc_hash_add(q->qdisc, false);
net/sched/sch_cbs.c
423
q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
net/sched/sch_cbs.c
425
q->enqueue = cbs_enqueue_soft;
net/sched/sch_cbs.c
426
q->dequeue = cbs_dequeue_soft;
net/sched/sch_cbs.c
428
qdisc_watchdog_init(&q->watchdog, sch);
net/sched/sch_cbs.c
435
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
439
if (!q->qdisc)
net/sched/sch_cbs.c
442
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_cbs.c
443
cbs_disable_offload(dev, q);
net/sched/sch_cbs.c
446
list_del(&q->cbs_list);
net/sched/sch_cbs.c
449
qdisc_put(q->qdisc);
net/sched/sch_cbs.c
454
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
462
opt.hicredit = READ_ONCE(q->hicredit);
net/sched/sch_cbs.c
463
opt.locredit = READ_ONCE(q->locredit);
net/sched/sch_cbs.c
464
opt.sendslope = div64_s64(READ_ONCE(q->sendslope), BYTES_PER_KBIT);
net/sched/sch_cbs.c
465
opt.idleslope = div64_s64(READ_ONCE(q->idleslope), BYTES_PER_KBIT);
net/sched/sch_cbs.c
466
opt.offload = READ_ONCE(q->offload);
net/sched/sch_cbs.c
481
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
483
if (cl != 1 || !q->qdisc) /* only one class */
net/sched/sch_cbs.c
487
tcm->tcm_info = q->qdisc->handle;
net/sched/sch_cbs.c
495
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
504
*old = qdisc_replace(sch, new, &q->qdisc);
net/sched/sch_cbs.c
510
struct cbs_sched_data *q = qdisc_priv(sch);
net/sched/sch_cbs.c
512
return q->qdisc;
net/sched/sch_choke.c
103
static void choke_zap_tail_holes(struct choke_sched_data *q)
net/sched/sch_choke.c
106
q->tail = (q->tail - 1) & q->tab_mask;
net/sched/sch_choke.c
107
if (q->head == q->tail)
net/sched/sch_choke.c
109
} while (q->tab[q->tail] == NULL);
net/sched/sch_choke.c
116
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
117
struct sk_buff *skb = q->tab[idx];
net/sched/sch_choke.c
119
q->tab[idx] = NULL;
net/sched/sch_choke.c
121
if (idx == q->head)
net/sched/sch_choke.c
122
choke_zap_head_holes(q);
net/sched/sch_choke.c
123
if (idx == q->tail)
net/sched/sch_choke.c
124
choke_zap_tail_holes(q);
net/sched/sch_choke.c
126
--sch->q.qlen;
net/sched/sch_choke.c
179
static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
net/sched/sch_choke.c
186
*pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
net/sched/sch_choke.c
187
skb = q->tab[*pidx];
net/sched/sch_choke.c
192
return q->tab[*pidx = q->head];
net/sched/sch_choke.c
199
static bool choke_match_random(const struct choke_sched_data *q,
net/sched/sch_choke.c
205
if (q->head == q->tail)
net/sched/sch_choke.c
208
oskb = choke_peek_random(q, pidx);
net/sched/sch_choke.c
215
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
216
const struct red_parms *p = &q->parms;
net/sched/sch_choke.c
220
q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
net/sched/sch_choke.c
221
if (red_is_idling(&q->vars))
net/sched/sch_choke.c
222
red_end_of_idle_period(&q->vars);
net/sched/sch_choke.c
225
if (q->vars.qavg <= p->qth_min)
net/sched/sch_choke.c
226
q->vars.qcount = -1;
net/sched/sch_choke.c
231
if (choke_match_random(q, skb, &idx)) {
net/sched/sch_choke.c
232
q->stats.matched++;
net/sched/sch_choke.c
238
if (q->vars.qavg > p->qth_max) {
net/sched/sch_choke.c
239
q->vars.qcount = -1;
net/sched/sch_choke.c
242
if (use_harddrop(q) || !use_ecn(q) ||
net/sched/sch_choke.c
244
q->stats.forced_drop++;
net/sched/sch_choke.c
248
q->stats.forced_mark++;
net/sched/sch_choke.c
249
} else if (++q->vars.qcount) {
net/sched/sch_choke.c
250
if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
net/sched/sch_choke.c
251
q->vars.qcount = 0;
net/sched/sch_choke.c
252
q->vars.qR = red_random(p);
net/sched/sch_choke.c
255
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
net/sched/sch_choke.c
256
q->stats.prob_drop++;
net/sched/sch_choke.c
260
q->stats.prob_mark++;
net/sched/sch_choke.c
263
q->vars.qR = red_random(p);
net/sched/sch_choke.c
267
if (sch->q.qlen < q->limit) {
net/sched/sch_choke.c
268
q->tab[q->tail] = skb;
net/sched/sch_choke.c
269
q->tail = (q->tail + 1) & q->tab_mask;
net/sched/sch_choke.c
270
++sch->q.qlen;
net/sched/sch_choke.c
275
q->stats.pdrop++;
net/sched/sch_choke.c
285
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
288
if (q->head == q->tail) {
net/sched/sch_choke.c
289
if (!red_is_idling(&q->vars))
net/sched/sch_choke.c
290
red_start_of_idle_period(&q->vars);
net/sched/sch_choke.c
294
skb = q->tab[q->head];
net/sched/sch_choke.c
295
q->tab[q->head] = NULL;
net/sched/sch_choke.c
296
choke_zap_head_holes(q);
net/sched/sch_choke.c
297
--sch->q.qlen;
net/sched/sch_choke.c
306
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
308
while (q->head != q->tail) {
net/sched/sch_choke.c
309
struct sk_buff *skb = q->tab[q->head];
net/sched/sch_choke.c
311
q->head = (q->head + 1) & q->tab_mask;
net/sched/sch_choke.c
317
if (q->tab)
net/sched/sch_choke.c
318
memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
net/sched/sch_choke.c
319
q->head = q->tail = 0;
net/sched/sch_choke.c
320
red_restart(&q->vars);
net/sched/sch_choke.c
338
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
370
if (mask != q->tab_mask) {
net/sched/sch_choke.c
378
old = q->tab;
net/sched/sch_choke.c
380
unsigned int oqlen = sch->q.qlen, tail = 0;
net/sched/sch_choke.c
383
while (q->head != q->tail) {
net/sched/sch_choke.c
384
struct sk_buff *skb = q->tab[q->head];
net/sched/sch_choke.c
386
q->head = (q->head + 1) & q->tab_mask;
net/sched/sch_choke.c
395
--sch->q.qlen;
net/sched/sch_choke.c
398
qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
net/sched/sch_choke.c
399
q->head = 0;
net/sched/sch_choke.c
400
q->tail = tail;
net/sched/sch_choke.c
403
q->tab_mask = mask;
net/sched/sch_choke.c
404
q->tab = ntab;
net/sched/sch_choke.c
408
WRITE_ONCE(q->flags, ctl->flags);
net/sched/sch_choke.c
409
WRITE_ONCE(q->limit, ctl->limit);
net/sched/sch_choke.c
411
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
net/sched/sch_choke.c
415
red_set_vars(&q->vars);
net/sched/sch_choke.c
417
if (q->head == q->tail)
net/sched/sch_choke.c
418
red_end_of_idle_period(&q->vars);
net/sched/sch_choke.c
433
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
434
u8 Wlog = READ_ONCE(q->parms.Wlog);
net/sched/sch_choke.c
437
.limit = READ_ONCE(q->limit),
net/sched/sch_choke.c
438
.flags = READ_ONCE(q->flags),
net/sched/sch_choke.c
439
.qth_min = READ_ONCE(q->parms.qth_min) >> Wlog,
net/sched/sch_choke.c
440
.qth_max = READ_ONCE(q->parms.qth_max) >> Wlog,
net/sched/sch_choke.c
442
.Plog = READ_ONCE(q->parms.Plog),
net/sched/sch_choke.c
443
.Scell_log = READ_ONCE(q->parms.Scell_log),
net/sched/sch_choke.c
451
nla_put_u32(skb, TCA_CHOKE_MAX_P, READ_ONCE(q->parms.max_P)))
net/sched/sch_choke.c
462
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
464
.early = q->stats.prob_drop + q->stats.forced_drop,
net/sched/sch_choke.c
465
.marked = q->stats.prob_mark + q->stats.forced_mark,
net/sched/sch_choke.c
466
.pdrop = q->stats.pdrop,
net/sched/sch_choke.c
467
.matched = q->stats.matched,
net/sched/sch_choke.c
475
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
477
choke_free(q->tab);
net/sched/sch_choke.c
482
struct choke_sched_data *q = qdisc_priv(sch);
net/sched/sch_choke.c
484
return (q->head != q->tail) ? q->tab[q->head] : NULL;
net/sched/sch_choke.c
75
static unsigned int choke_len(const struct choke_sched_data *q)
net/sched/sch_choke.c
77
return (q->tail - q->head) & q->tab_mask;
net/sched/sch_choke.c
81
static int use_ecn(const struct choke_sched_data *q)
net/sched/sch_choke.c
83
return q->flags & TC_RED_ECN;
net/sched/sch_choke.c
87
static int use_harddrop(const struct choke_sched_data *q)
net/sched/sch_choke.c
89
return q->flags & TC_RED_HARDDROP;
net/sched/sch_choke.c
93
static void choke_zap_head_holes(struct choke_sched_data *q)
net/sched/sch_choke.c
96
q->head = (q->head + 1) & q->tab_mask;
net/sched/sch_choke.c
97
if (q->head == q->tail)
net/sched/sch_choke.c
99
} while (q->tab[q->head] == NULL);
net/sched/sch_codel.c
105
struct codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_codel.c
119
WRITE_ONCE(q->params.target,
net/sched/sch_codel.c
126
WRITE_ONCE(q->params.ce_threshold,
net/sched/sch_codel.c
133
WRITE_ONCE(q->params.interval,
net/sched/sch_codel.c
142
WRITE_ONCE(q->params.ecn,
net/sched/sch_codel.c
145
while (sch->q.qlen > sch->limit) {
net/sched/sch_codel.c
164
struct codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_codel.c
168
codel_params_init(&q->params);
net/sched/sch_codel.c
169
codel_vars_init(&q->vars);
net/sched/sch_codel.c
170
codel_stats_init(&q->stats);
net/sched/sch_codel.c
171
q->params.mtu = psched_mtu(qdisc_dev(sch));
net/sched/sch_codel.c
192
struct codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_codel.c
201
codel_time_to_us(READ_ONCE(q->params.target))) ||
net/sched/sch_codel.c
205
codel_time_to_us(READ_ONCE(q->params.interval))) ||
net/sched/sch_codel.c
207
READ_ONCE(q->params.ecn)))
net/sched/sch_codel.c
209
ce_threshold = READ_ONCE(q->params.ce_threshold);
net/sched/sch_codel.c
223
const struct codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_codel.c
225
.maxpacket = q->stats.maxpacket,
net/sched/sch_codel.c
226
.count = q->vars.count,
net/sched/sch_codel.c
227
.lastcount = q->vars.lastcount,
net/sched/sch_codel.c
228
.drop_overlimit = q->drop_overlimit,
net/sched/sch_codel.c
229
.ldelay = codel_time_to_us(q->vars.ldelay),
net/sched/sch_codel.c
230
.dropping = q->vars.dropping,
net/sched/sch_codel.c
231
.ecn_mark = q->stats.ecn_mark,
net/sched/sch_codel.c
232
.ce_mark = q->stats.ce_mark,
net/sched/sch_codel.c
235
if (q->vars.dropping) {
net/sched/sch_codel.c
236
codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
net/sched/sch_codel.c
249
struct codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_codel.c
252
codel_vars_init(&q->vars);
net/sched/sch_codel.c
42
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
net/sched/sch_codel.c
61
struct codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_codel.c
64
skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
net/sched/sch_codel.c
65
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
net/sched/sch_codel.c
68
if (q->stats.drop_count) {
net/sched/sch_codel.c
69
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
net/sched/sch_codel.c
70
q->stats.drop_count = 0;
net/sched/sch_codel.c
71
q->stats.drop_len = 0;
net/sched/sch_codel.c
81
struct codel_sched_data *q;
net/sched/sch_codel.c
87
q = qdisc_priv(sch);
net/sched/sch_codel.c
88
q->drop_overlimit++;
net/sched/sch_drr.c
136
qdisc_class_hash_insert(&q->clhash, &cl->common);
net/sched/sch_drr.c
139
qdisc_class_hash_grow(sch, &q->clhash);
net/sched/sch_drr.c
155
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
166
qdisc_class_hash_remove(&q->clhash, &cl->common);
net/sched/sch_drr.c
182
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
189
return q->block;
net/sched/sch_drr.c
285
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
292
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_drr.c
293
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_drr.c
303
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
316
fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_drr.c
342
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
364
list_add_tail(&cl->alist, &q->active);
net/sched/sch_drr.c
369
sch->q.qlen++;
net/sched/sch_drr.c
375
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
380
if (list_empty(&q->active))
net/sched/sch_drr.c
383
cl = list_first_entry(&q->active, struct drr_class, alist);
net/sched/sch_drr.c
396
if (cl->qdisc->q.qlen == 0)
net/sched/sch_drr.c
402
sch->q.qlen--;
net/sched/sch_drr.c
407
list_move_tail(&cl->alist, &q->active);
net/sched/sch_drr.c
416
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
419
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_drr.c
422
err = qdisc_class_hash_init(&q->clhash);
net/sched/sch_drr.c
425
INIT_LIST_HEAD(&q->active);
net/sched/sch_drr.c
431
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
435
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_drr.c
436
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_drr.c
437
if (cl->qdisc->q.qlen)
net/sched/sch_drr.c
446
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
45
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_drr.c
451
tcf_block_put(q->block);
net/sched/sch_drr.c
453
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_drr.c
454
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
net/sched/sch_drr.c
458
qdisc_class_hash_destroy(&q->clhash);
net/sched/sch_drr.c
48
clc = qdisc_class_find(&q->clhash, classid);
net/sched/sch_drr.c
62
struct drr_sched *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
1002
READ_ONCE(q->min_qlen_step)) ||
net/sched/sch_dualpi2.c
1004
READ_ONCE(q->coupling_factor)) ||
net/sched/sch_dualpi2.c
1006
READ_ONCE(q->drop_overload)) ||
net/sched/sch_dualpi2.c
1008
READ_ONCE(q->drop_early)) ||
net/sched/sch_dualpi2.c
1010
READ_ONCE(q->c_protection_wc)) ||
net/sched/sch_dualpi2.c
1011
nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
net/sched/sch_dualpi2.c
1012
nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
net/sched/sch_dualpi2.c
1024
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
1026
.prob = READ_ONCE(q->pi2_prob),
net/sched/sch_dualpi2.c
1027
.packets_in_c = q->packets_in_c,
net/sched/sch_dualpi2.c
1028
.packets_in_l = q->packets_in_l,
net/sched/sch_dualpi2.c
1029
.maxq = q->maxq,
net/sched/sch_dualpi2.c
1030
.ecn_mark = q->ecn_mark,
net/sched/sch_dualpi2.c
1031
.credit = q->c_protection_credit,
net/sched/sch_dualpi2.c
1032
.step_marks = q->step_marks,
net/sched/sch_dualpi2.c
1033
.memory_used = q->memory_used,
net/sched/sch_dualpi2.c
1034
.max_memory_used = q->max_memory_used,
net/sched/sch_dualpi2.c
1035
.memory_limit = q->memory_limit,
net/sched/sch_dualpi2.c
1039
get_queue_delays(q, &qc, &ql);
net/sched/sch_dualpi2.c
1051
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
1054
qdisc_reset_queue(q->l_queue);
net/sched/sch_dualpi2.c
1055
q->c_head_ts = 0;
net/sched/sch_dualpi2.c
1056
q->l_head_ts = 0;
net/sched/sch_dualpi2.c
1057
q->pi2_prob = 0;
net/sched/sch_dualpi2.c
1058
q->packets_in_c = 0;
net/sched/sch_dualpi2.c
1059
q->packets_in_l = 0;
net/sched/sch_dualpi2.c
1060
q->maxq = 0;
net/sched/sch_dualpi2.c
1061
q->ecn_mark = 0;
net/sched/sch_dualpi2.c
1062
q->step_marks = 0;
net/sched/sch_dualpi2.c
1063
q->memory_used = 0;
net/sched/sch_dualpi2.c
1064
q->max_memory_used = 0;
net/sched/sch_dualpi2.c
1065
dualpi2_reset_c_protection(q);
net/sched/sch_dualpi2.c
1070
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
1072
q->pi2_tupdate = 0;
net/sched/sch_dualpi2.c
1073
hrtimer_cancel(&q->pi2_timer);
net/sched/sch_dualpi2.c
1074
if (q->l_queue)
net/sched/sch_dualpi2.c
1075
qdisc_put(q->l_queue);
net/sched/sch_dualpi2.c
1076
tcf_block_put(q->tcf_block);
net/sched/sch_dualpi2.c
1095
static void dualpi2_unbind(struct Qdisc *q, unsigned long cl)
net/sched/sch_dualpi2.c
1102
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
1106
return q->tcf_block;
net/sched/sch_dualpi2.c
147
static u64 head_enqueue_time(struct Qdisc *q)
net/sched/sch_dualpi2.c
149
struct sk_buff *skb = qdisc_peek_head(q);
net/sched/sch_dualpi2.c
170
static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
net/sched/sch_dualpi2.c
172
return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
net/sched/sch_dualpi2.c
185
static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q)
net/sched/sch_dualpi2.c
187
return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step;
net/sched/sch_dualpi2.c
190
static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb)
net/sched/sch_dualpi2.c
193
q->ecn_mark++;
net/sched/sch_dualpi2.c
199
static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q)
net/sched/sch_dualpi2.c
201
q->c_protection_credit = q->c_protection_init;
net/sched/sch_dualpi2.c
209
struct dualpi2_sched_data *q, u32 wc)
net/sched/sch_dualpi2.c
211
q->c_protection_wc = wc;
net/sched/sch_dualpi2.c
212
q->c_protection_wl = MAX_WC - wc;
net/sched/sch_dualpi2.c
213
q->c_protection_init = (s32)psched_mtu(qdisc_dev(sch)) *
net/sched/sch_dualpi2.c
214
((int)q->c_protection_wc - (int)q->c_protection_wl);
net/sched/sch_dualpi2.c
215
dualpi2_reset_c_protection(q);
net/sched/sch_dualpi2.c
230
static bool dualpi2_classic_marking(struct dualpi2_sched_data *q,
net/sched/sch_dualpi2.c
237
dualpi2_mark(q, skb);
net/sched/sch_dualpi2.c
253
static bool dualpi2_scalable_marking(struct dualpi2_sched_data *q,
net/sched/sch_dualpi2.c
260
if (!q->drop_overload ||
net/sched/sch_dualpi2.c
272
dualpi2_mark(q, skb);
net/sched/sch_dualpi2.c
282
static bool must_drop(struct Qdisc *sch, struct dualpi2_sched_data *q,
net/sched/sch_dualpi2.c
292
prob = READ_ONCE(q->pi2_prob);
net/sched/sch_dualpi2.c
293
local_l_prob = (u64)prob * q->coupling_factor;
net/sched/sch_dualpi2.c
298
return dualpi2_classic_marking(q, skb, prob, overload);
net/sched/sch_dualpi2.c
300
return dualpi2_scalable_marking(q, skb, local_l_prob, prob,
net/sched/sch_dualpi2.c
341
static int dualpi2_skb_classify(struct dualpi2_sched_data *q,
net/sched/sch_dualpi2.c
350
if (cb->ect & q->ecn_mask) {
net/sched/sch_dualpi2.c
355
if (TC_H_MAJ(skb->priority) == q->sch->handle &&
net/sched/sch_dualpi2.c
361
fl = rcu_dereference_bh(q->tcf_filters);
net/sched/sch_dualpi2.c
388
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
392
unlikely((u64)q->memory_used + skb->truesize > q->memory_limit)) {
net/sched/sch_dualpi2.c
395
qdisc_qstats_overlimit(q->l_queue);
net/sched/sch_dualpi2.c
400
if (q->drop_early && must_drop(sch, q, skb)) {
net/sched/sch_dualpi2.c
408
q->memory_used += skb->truesize;
net/sched/sch_dualpi2.c
409
if (q->memory_used > q->max_memory_used)
net/sched/sch_dualpi2.c
410
q->max_memory_used = q->memory_used;
net/sched/sch_dualpi2.c
412
if (qdisc_qlen(sch) > q->maxq)
net/sched/sch_dualpi2.c
413
q->maxq = qdisc_qlen(sch);
net/sched/sch_dualpi2.c
417
dualpi2_skb_cb(skb)->apply_step = skb_apply_step(skb, q);
net/sched/sch_dualpi2.c
420
++sch->q.qlen;
net/sched/sch_dualpi2.c
422
++q->packets_in_l;
net/sched/sch_dualpi2.c
423
if (!q->l_head_ts)
net/sched/sch_dualpi2.c
424
q->l_head_ts = cb->ts;
net/sched/sch_dualpi2.c
425
return qdisc_enqueue_tail(skb, q->l_queue);
net/sched/sch_dualpi2.c
427
++q->packets_in_c;
net/sched/sch_dualpi2.c
428
if (!q->c_head_ts)
net/sched/sch_dualpi2.c
429
q->c_head_ts = cb->ts;
net/sched/sch_dualpi2.c
444
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
447
err = dualpi2_skb_classify(q, skb);
net/sched/sch_dualpi2.c
455
if (q->split_gso && skb_is_gso(skb)) {
net/sched/sch_dualpi2.c
518
struct dualpi2_sched_data *q,
net/sched/sch_dualpi2.c
526
c_len = qdisc_qlen(sch) - qdisc_qlen(q->l_queue);
net/sched/sch_dualpi2.c
527
if (qdisc_qlen(q->l_queue) && (!c_len || q->c_protection_credit <= 0)) {
net/sched/sch_dualpi2.c
528
skb = __qdisc_dequeue_head(&q->l_queue->q);
net/sched/sch_dualpi2.c
529
WRITE_ONCE(q->l_head_ts, head_enqueue_time(q->l_queue));
net/sched/sch_dualpi2.c
531
*credit_change = q->c_protection_wc;
net/sched/sch_dualpi2.c
532
qdisc_qstats_backlog_dec(q->l_queue, skb);
net/sched/sch_dualpi2.c
535
--sch->q.qlen;
net/sched/sch_dualpi2.c
536
q->memory_used -= skb->truesize;
net/sched/sch_dualpi2.c
538
skb = __qdisc_dequeue_head(&sch->q);
net/sched/sch_dualpi2.c
539
WRITE_ONCE(q->c_head_ts, head_enqueue_time(sch));
net/sched/sch_dualpi2.c
540
if (qdisc_qlen(q->l_queue))
net/sched/sch_dualpi2.c
541
*credit_change = ~((s32)q->c_protection_wl) + 1;
net/sched/sch_dualpi2.c
542
q->memory_used -= skb->truesize;
net/sched/sch_dualpi2.c
544
dualpi2_reset_c_protection(q);
net/sched/sch_dualpi2.c
552
static int do_step_aqm(struct dualpi2_sched_data *q, struct sk_buff *skb,
net/sched/sch_dualpi2.c
557
if (q->step_in_packets)
net/sched/sch_dualpi2.c
558
qdelay = qdisc_qlen(q->l_queue);
net/sched/sch_dualpi2.c
562
if (dualpi2_skb_cb(skb)->apply_step && qdelay > q->step_thresh) {
net/sched/sch_dualpi2.c
568
if (dualpi2_mark(q, skb))
net/sched/sch_dualpi2.c
569
++q->step_marks;
net/sched/sch_dualpi2.c
571
qdisc_bstats_update(q->l_queue, skb);
net/sched/sch_dualpi2.c
575
static void drop_and_retry(struct dualpi2_sched_data *q, struct sk_buff *skb,
net/sched/sch_dualpi2.c
578
++q->deferred_drops_cnt;
net/sched/sch_dualpi2.c
579
q->deferred_drops_len += qdisc_pkt_len(skb);
net/sched/sch_dualpi2.c
586
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
593
while ((skb = dequeue_packet(sch, q, &credit_change, now))) {
net/sched/sch_dualpi2.c
594
if (!q->drop_early && must_drop(sch, q, skb)) {
net/sched/sch_dualpi2.c
595
drop_and_retry(q, skb, sch,
net/sched/sch_dualpi2.c
600
if (skb_in_l_queue(skb) && do_step_aqm(q, skb, now)) {
net/sched/sch_dualpi2.c
601
qdisc_qstats_drop(q->l_queue);
net/sched/sch_dualpi2.c
602
drop_and_retry(q, skb, sch,
net/sched/sch_dualpi2.c
607
q->c_protection_credit += credit_change;
net/sched/sch_dualpi2.c
612
if (q->deferred_drops_cnt) {
net/sched/sch_dualpi2.c
613
qdisc_tree_reduce_backlog(sch, q->deferred_drops_cnt,
net/sched/sch_dualpi2.c
614
q->deferred_drops_len);
net/sched/sch_dualpi2.c
615
q->deferred_drops_cnt = 0;
net/sched/sch_dualpi2.c
616
q->deferred_drops_len = 0;
net/sched/sch_dualpi2.c
627
static void get_queue_delays(struct dualpi2_sched_data *q, u64 *qdelay_c,
net/sched/sch_dualpi2.c
633
qc = READ_ONCE(q->c_head_ts);
net/sched/sch_dualpi2.c
634
ql = READ_ONCE(q->l_head_ts);
net/sched/sch_dualpi2.c
642
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
649
get_queue_delays(q, &qdelay_c, &qdelay_l);
net/sched/sch_dualpi2.c
655
delta = ((s64)qdelay - (s64)q->pi2_target) * q->pi2_alpha;
net/sched/sch_dualpi2.c
656
delta += ((s64)qdelay - (s64)q->last_qdelay) * q->pi2_beta;
net/sched/sch_dualpi2.c
657
q->last_qdelay = qdelay;
net/sched/sch_dualpi2.c
661
new_prob = __scale_delta(delta) + q->pi2_prob;
net/sched/sch_dualpi2.c
662
if (new_prob < q->pi2_prob)
net/sched/sch_dualpi2.c
665
new_prob = q->pi2_prob - __scale_delta(~delta + 1);
net/sched/sch_dualpi2.c
666
if (new_prob > q->pi2_prob)
net/sched/sch_dualpi2.c
673
if (!q->drop_overload)
net/sched/sch_dualpi2.c
674
return min_t(u32, new_prob, MAX_PROB / q->coupling_factor);
net/sched/sch_dualpi2.c
712
struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
net/sched/sch_dualpi2.c
713
struct Qdisc *sch = q->sch;
net/sched/sch_dualpi2.c
720
WRITE_ONCE(q->pi2_prob, calculate_probability(sch));
net/sched/sch_dualpi2.c
721
hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q));
net/sched/sch_dualpi2.c
763
struct dualpi2_sched_data *q;
net/sched/sch_dualpi2.c
781
q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
788
WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
net/sched/sch_dualpi2.c
792
WRITE_ONCE(q->memory_limit,
net/sched/sch_dualpi2.c
798
WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
net/sched/sch_dualpi2.c
804
WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
net/sched/sch_dualpi2.c
810
WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
net/sched/sch_dualpi2.c
816
WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
net/sched/sch_dualpi2.c
822
WRITE_ONCE(q->step_in_packets, true);
net/sched/sch_dualpi2.c
823
WRITE_ONCE(q->step_thresh, step_th);
net/sched/sch_dualpi2.c
827
WRITE_ONCE(q->step_in_packets, false);
net/sched/sch_dualpi2.c
828
WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
net/sched/sch_dualpi2.c
832
WRITE_ONCE(q->min_qlen_step,
net/sched/sch_dualpi2.c
838
WRITE_ONCE(q->coupling_factor, coupling);
net/sched/sch_dualpi2.c
844
WRITE_ONCE(q->drop_overload, (bool)drop_overload);
net/sched/sch_dualpi2.c
850
WRITE_ONCE(q->drop_early, (bool)drop_early);
net/sched/sch_dualpi2.c
856
dualpi2_calculate_c_protection(sch, q, wc);
net/sched/sch_dualpi2.c
862
WRITE_ONCE(q->ecn_mask, ecn_mask);
net/sched/sch_dualpi2.c
868
WRITE_ONCE(q->split_gso, (bool)split_gso);
net/sched/sch_dualpi2.c
874
q->memory_used > q->memory_limit) {
net/sched/sch_dualpi2.c
877
q->memory_used -= skb->truesize;
net/sched/sch_dualpi2.c
891
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
893
q->sch->limit = 10000; /* Max 125ms at 1Gbps */
net/sched/sch_dualpi2.c
894
q->memory_limit = get_memory_limit(sch, q->sch->limit);
net/sched/sch_dualpi2.c
896
q->pi2_target = 15 * NSEC_PER_MSEC;
net/sched/sch_dualpi2.c
897
q->pi2_tupdate = 16 * NSEC_PER_MSEC;
net/sched/sch_dualpi2.c
898
q->pi2_alpha = dualpi2_scale_alpha_beta(41); /* ~0.16 Hz * 256 */
net/sched/sch_dualpi2.c
899
q->pi2_beta = dualpi2_scale_alpha_beta(819); /* ~3.20 Hz * 256 */
net/sched/sch_dualpi2.c
901
q->step_thresh = 1 * NSEC_PER_MSEC;
net/sched/sch_dualpi2.c
902
q->step_in_packets = false;
net/sched/sch_dualpi2.c
904
dualpi2_calculate_c_protection(q->sch, q, 10); /* wc=10%, wl=90% */
net/sched/sch_dualpi2.c
906
q->ecn_mask = TC_DUALPI2_ECN_MASK_L4S_ECT; /* INET_ECN_ECT_1 */
net/sched/sch_dualpi2.c
907
q->min_qlen_step = 0; /* Always apply step mark in L-queue */
net/sched/sch_dualpi2.c
908
q->coupling_factor = 2; /* window fairness for equal RTTs */
net/sched/sch_dualpi2.c
909
q->drop_overload = TC_DUALPI2_DROP_OVERLOAD_DROP; /* Drop overload */
net/sched/sch_dualpi2.c
910
q->drop_early = TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE; /* Drop dequeue */
net/sched/sch_dualpi2.c
911
q->split_gso = TC_DUALPI2_SPLIT_GSO_SPLIT_GSO; /* Split GSO */
net/sched/sch_dualpi2.c
917
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
920
q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
net/sched/sch_dualpi2.c
922
if (!q->l_queue)
net/sched/sch_dualpi2.c
925
err = tcf_block_get(&q->tcf_block, &q->tcf_filters, sch, extack);
net/sched/sch_dualpi2.c
929
q->sch = sch;
net/sched/sch_dualpi2.c
931
hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC,
net/sched/sch_dualpi2.c
941
hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
net/sched/sch_dualpi2.c
948
struct dualpi2_sched_data *q = qdisc_priv(sch);
net/sched/sch_dualpi2.c
953
step_in_pkts = READ_ONCE(q->step_in_packets);
net/sched/sch_dualpi2.c
954
step_th = READ_ONCE(q->step_thresh);
net/sched/sch_dualpi2.c
963
READ_ONCE(q->memory_limit)) ||
net/sched/sch_dualpi2.c
965
convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
net/sched/sch_dualpi2.c
967
convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
net/sched/sch_dualpi2.c
969
dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
net/sched/sch_dualpi2.c
971
dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
net/sched/sch_dualpi2.c
974
READ_ONCE(q->min_qlen_step)) ||
net/sched/sch_dualpi2.c
976
READ_ONCE(q->coupling_factor)) ||
net/sched/sch_dualpi2.c
978
READ_ONCE(q->drop_overload)) ||
net/sched/sch_dualpi2.c
980
READ_ONCE(q->drop_early)) ||
net/sched/sch_dualpi2.c
982
READ_ONCE(q->c_protection_wc)) ||
net/sched/sch_dualpi2.c
983
nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
net/sched/sch_dualpi2.c
984
nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
net/sched/sch_dualpi2.c
990
READ_ONCE(q->memory_limit)) ||
net/sched/sch_dualpi2.c
992
convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
net/sched/sch_dualpi2.c
994
convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
net/sched/sch_dualpi2.c
996
dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
net/sched/sch_dualpi2.c
998
dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
net/sched/sch_etf.c
101
now = q->get_time();
net/sched/sch_etf.c
102
if (ktime_before(txtime, now) || ktime_before(txtime, q->last))
net/sched/sch_etf.c
110
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
113
p = rb_first_cached(&q->head);
net/sched/sch_etf.c
122
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
127
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_etf.c
131
next = ktime_sub_ns(skb->tstamp, q->delta);
net/sched/sch_etf.c
132
qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
net/sched/sch_etf.c
165
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
166
struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
net/sched/sch_etf.c
189
rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost);
net/sched/sch_etf.c
192
sch->q.qlen++;
net/sched/sch_etf.c
203
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
211
rb_erase_cached(&skb->rbnode, &q->head);
net/sched/sch_etf.c
225
sch->q.qlen--;
net/sched/sch_etf.c
233
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
235
rb_erase_cached(&skb->rbnode, &q->head);
net/sched/sch_etf.c
248
q->last = skb->tstamp;
net/sched/sch_etf.c
250
sch->q.qlen--;
net/sched/sch_etf.c
255
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
263
now = q->get_time();
net/sched/sch_etf.c
275
if (q->deadline_mode) {
net/sched/sch_etf.c
281
next = ktime_sub_ns(skb->tstamp, q->delta);
net/sched/sch_etf.c
297
struct etf_sched_data *q)
net/sched/sch_etf.c
303
if (!q->offload)
net/sched/sch_etf.c
310
etf.queue = q->queue;
net/sched/sch_etf.c
319
static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q,
net/sched/sch_etf.c
331
etf.queue = q->queue;
net/sched/sch_etf.c
346
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
379
q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
net/sched/sch_etf.c
382
err = etf_enable_offload(dev, q, extack);
net/sched/sch_etf.c
388
q->delta = qopt->delta;
net/sched/sch_etf.c
389
q->clockid = qopt->clockid;
net/sched/sch_etf.c
390
q->offload = OFFLOAD_IS_ON(qopt);
net/sched/sch_etf.c
391
q->deadline_mode = DEADLINE_MODE_IS_ON(qopt);
net/sched/sch_etf.c
392
q->skip_sock_check = SKIP_SOCK_CHECK_IS_SET(qopt);
net/sched/sch_etf.c
394
switch (q->clockid) {
net/sched/sch_etf.c
396
q->get_time = ktime_get_real;
net/sched/sch_etf.c
399
q->get_time = ktime_get;
net/sched/sch_etf.c
402
q->get_time = ktime_get_boottime;
net/sched/sch_etf.c
405
q->get_time = ktime_get_clocktai;
net/sched/sch_etf.c
412
qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid);
net/sched/sch_etf.c
419
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
420
struct rb_node *p = rb_first_cached(&q->head);
net/sched/sch_etf.c
427
rb_erase_cached(&skb->rbnode, &q->head);
net/sched/sch_etf.c
429
sch->q.qlen--;
net/sched/sch_etf.c
435
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
438
if (q->watchdog.qdisc == sch)
net/sched/sch_etf.c
439
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_etf.c
443
__qdisc_reset_queue(&sch->q);
net/sched/sch_etf.c
445
q->last = 0;
net/sched/sch_etf.c
450
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
454
if (q->watchdog.qdisc == sch)
net/sched/sch_etf.c
455
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_etf.c
457
etf_disable_offload(dev, q);
net/sched/sch_etf.c
462
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
470
opt.delta = READ_ONCE(q->delta);
net/sched/sch_etf.c
471
opt.clockid = READ_ONCE(q->clockid);
net/sched/sch_etf.c
472
if (READ_ONCE(q->offload))
net/sched/sch_etf.c
475
if (READ_ONCE(q->deadline_mode))
net/sched/sch_etf.c
478
if (READ_ONCE(q->skip_sock_check))
net/sched/sch_etf.c
77
struct etf_sched_data *q = qdisc_priv(sch);
net/sched/sch_etf.c
82
if (q->skip_sock_check)
net/sched/sch_etf.c
94
if (sk->sk_clockid != q->clockid)
net/sched/sch_etf.c
97
if (sk->sk_txtime_deadline_mode != q->deadline_mode)
net/sched/sch_ets.c
101
return &q->classes[arg - 1];
net/sched/sch_ets.c
106
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
107
int band = cl - q->classes;
net/sched/sch_ets.c
115
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
131
qopt.replace_params.bands = q->nbands;
net/sched/sch_ets.c
134
q->prio2band, sizeof(q->prio2band));
net/sched/sch_ets.c
136
for (i = 0; i < q->nbands; i++)
net/sched/sch_ets.c
137
q_sum += q->classes[i].quantum;
net/sched/sch_ets.c
139
for (i = 0; i < q->nbands; i++) {
net/sched/sch_ets.c
140
quantum = q->classes[i].quantum;
net/sched/sch_ets.c
201
static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
net/sched/sch_ets.c
203
unsigned int band = cl - q->classes;
net/sched/sch_ets.c
205
return band < q->nstrict;
net/sched/sch_ets.c
213
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
240
if (ets_class_is_strict(q, cl)) {
net/sched/sch_ets.c
288
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
290
if (band - 1 >= q->nbands)
net/sched/sch_ets.c
298
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
304
if (!ets_class_is_strict(q, cl) && sch->q.qlen)
net/sched/sch_ets.c
312
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
322
if (!ets_class_is_strict(q, cl)) {
net/sched/sch_ets.c
348
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
354
for (i = 0; i < q->nbands; i++) {
net/sched/sch_ets.c
364
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
371
return q->block;
net/sched/sch_ets.c
387
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
395
fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_ets.c
411
return &q->classes[q->prio2band[band & TC_PRIO_MAX]];
net/sched/sch_ets.c
416
if (band >= q->nbands)
net/sched/sch_ets.c
417
return &q->classes[q->prio2band[0]];
net/sched/sch_ets.c
418
return &q->classes[band];
net/sched/sch_ets.c
425
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
446
if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
net/sched/sch_ets.c
447
list_add_tail(&cl->alist, &q->active);
net/sched/sch_ets.c
452
sch->q.qlen++;
net/sched/sch_ets.c
461
sch->q.qlen--;
net/sched/sch_ets.c
467
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
474
for (band = 0; band < q->nstrict; band++) {
net/sched/sch_ets.c
475
cl = &q->classes[band];
net/sched/sch_ets.c
481
if (list_empty(&q->active))
net/sched/sch_ets.c
484
cl = list_first_entry(&q->active, struct ets_class, alist);
net/sched/sch_ets.c
497
if (cl->qdisc->q.qlen == 0)
net/sched/sch_ets.c
503
list_move_tail(&cl->alist, &q->active);
net/sched/sch_ets.c
590
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
592
unsigned int oldbands = q->nbands;
net/sched/sch_ets.c
647
ets_class_id(sch, &q->classes[i]),
net/sched/sch_ets.c
659
if (cl_is_active(&q->classes[i]))
net/sched/sch_ets.c
660
list_del_init(&q->classes[i].alist);
net/sched/sch_ets.c
661
qdisc_purge_queue(q->classes[i].qdisc);
net/sched/sch_ets.c
664
WRITE_ONCE(q->nbands, nbands);
net/sched/sch_ets.c
665
for (i = nstrict; i < q->nstrict; i++) {
net/sched/sch_ets.c
666
if (q->classes[i].qdisc->q.qlen) {
net/sched/sch_ets.c
667
list_add_tail(&q->classes[i].alist, &q->active);
net/sched/sch_ets.c
668
q->classes[i].deficit = quanta[i];
net/sched/sch_ets.c
671
for (i = q->nstrict; i < nstrict; i++) {
net/sched/sch_ets.c
672
if (cl_is_active(&q->classes[i]))
net/sched/sch_ets.c
673
list_del_init(&q->classes[i].alist);
net/sched/sch_ets.c
675
WRITE_ONCE(q->nstrict, nstrict);
net/sched/sch_ets.c
676
memcpy(q->prio2band, priomap, sizeof(priomap));
net/sched/sch_ets.c
678
for (i = 0; i < q->nbands; i++)
net/sched/sch_ets.c
679
WRITE_ONCE(q->classes[i].quantum, quanta[i]);
net/sched/sch_ets.c
681
for (i = oldbands; i < q->nbands; i++) {
net/sched/sch_ets.c
682
q->classes[i].qdisc = queues[i];
net/sched/sch_ets.c
683
if (q->classes[i].qdisc != &noop_qdisc)
net/sched/sch_ets.c
684
qdisc_hash_add(q->classes[i].qdisc, true);
net/sched/sch_ets.c
690
for (i = q->nbands; i < oldbands; i++) {
net/sched/sch_ets.c
691
qdisc_put(q->classes[i].qdisc);
net/sched/sch_ets.c
692
q->classes[i].qdisc = NULL;
net/sched/sch_ets.c
693
WRITE_ONCE(q->classes[i].quantum, 0);
net/sched/sch_ets.c
694
q->classes[i].deficit = 0;
net/sched/sch_ets.c
695
gnet_stats_basic_sync_init(&q->classes[i].bstats);
net/sched/sch_ets.c
696
memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
net/sched/sch_ets.c
704
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
710
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_ets.c
714
INIT_LIST_HEAD(&q->active);
net/sched/sch_ets.c
716
INIT_LIST_HEAD(&q->classes[i].alist);
net/sched/sch_ets.c
723
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
726
for (band = q->nstrict; band < q->nbands; band++) {
net/sched/sch_ets.c
727
if (q->classes[band].qdisc->q.qlen)
net/sched/sch_ets.c
728
list_del_init(&q->classes[band].alist);
net/sched/sch_ets.c
730
for (band = 0; band < q->nbands; band++)
net/sched/sch_ets.c
731
qdisc_reset(q->classes[band].qdisc);
net/sched/sch_ets.c
736
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
740
tcf_block_put(q->block);
net/sched/sch_ets.c
741
for (band = 0; band < q->nbands; band++)
net/sched/sch_ets.c
742
qdisc_put(q->classes[band].qdisc);
net/sched/sch_ets.c
747
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
763
nbands = READ_ONCE(q->nbands);
net/sched/sch_ets.c
767
nstrict = READ_ONCE(q->nstrict);
net/sched/sch_ets.c
778
READ_ONCE(q->classes[band].quantum)))
net/sched/sch_ets.c
791
READ_ONCE(q->prio2band[prio])))
net/sched/sch_ets.c
97
struct ets_sched *q = qdisc_priv(sch);
net/sched/sch_ets.c
99
if (arg == 0 || arg > q->nbands)
net/sched/sch_fifo.c
231
int fifo_set_limit(struct Qdisc *q, unsigned int limit)
net/sched/sch_fifo.c
237
if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
net/sched/sch_fifo.c
240
if (!q->ops->change)
net/sched/sch_fifo.c
249
ret = q->ops->change(q, nla, NULL);
net/sched/sch_fifo.c
260
struct Qdisc *q;
net/sched/sch_fifo.c
263
q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
net/sched/sch_fifo.c
265
if (q) {
net/sched/sch_fifo.c
266
err = fifo_set_limit(q, limit);
net/sched/sch_fifo.c
268
qdisc_put(q);
net/sched/sch_fifo.c
269
q = NULL;
net/sched/sch_fifo.c
273
return q ? : ERR_PTR(err);
net/sched/sch_fifo.c
32
if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
net/sched/sch_fifo.c
46
if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
net/sched/sch_fifo.c
51
__qdisc_queue_drop_head(sch, &sch->q, to_free);
net/sched/sch_fq.c
1011
fq_prio2band_compress_crumb(map->priomap, q->prio2band);
net/sched/sch_fq.c
1019
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
1031
fq_log = q->fq_trees_log;
net/sched/sch_fq.c
1046
WRITE_ONCE(q->flow_plimit,
net/sched/sch_fq.c
1053
WRITE_ONCE(q->quantum, quantum);
net/sched/sch_fq.c
1061
WRITE_ONCE(q->initial_quantum,
net/sched/sch_fq.c
1071
WRITE_ONCE(q->flow_max_rate,
net/sched/sch_fq.c
1075
WRITE_ONCE(q->low_rate_threshold,
net/sched/sch_fq.c
1082
WRITE_ONCE(q->rate_enable,
net/sched/sch_fq.c
1091
WRITE_ONCE(q->flow_refill_delay,
net/sched/sch_fq.c
1096
err = fq_load_priomap(q, tb[TCA_FQ_PRIOMAP], extack);
net/sched/sch_fq.c
1099
err = fq_load_weights(q, tb[TCA_FQ_WEIGHTS], extack);
net/sched/sch_fq.c
1102
WRITE_ONCE(q->orphan_mask,
net/sched/sch_fq.c
1106
WRITE_ONCE(q->ce_threshold,
net/sched/sch_fq.c
1111
WRITE_ONCE(q->timer_slack,
net/sched/sch_fq.c
1115
WRITE_ONCE(q->horizon,
net/sched/sch_fq.c
1120
WRITE_ONCE(q->horizon_drop,
net/sched/sch_fq.c
1128
WRITE_ONCE(q->offload_horizon, offload_horizon);
net/sched/sch_fq.c
1141
while (sch->q.qlen > sch->limit) {
net/sched/sch_fq.c
1159
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
1162
fq_free(q->fq_root);
net/sched/sch_fq.c
1163
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_fq.c
1169
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
1173
q->flow_plimit = 100;
net/sched/sch_fq.c
1174
q->quantum = 2 * psched_mtu(qdisc_dev(sch));
net/sched/sch_fq.c
1175
q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
net/sched/sch_fq.c
1176
q->flow_refill_delay = msecs_to_jiffies(40);
net/sched/sch_fq.c
1177
q->flow_max_rate = ~0UL;
net/sched/sch_fq.c
1178
q->time_next_delayed_flow = ~0ULL;
net/sched/sch_fq.c
1179
q->rate_enable = 1;
net/sched/sch_fq.c
1181
q->band_flows[i].new_flows.first = NULL;
net/sched/sch_fq.c
1182
q->band_flows[i].old_flows.first = NULL;
net/sched/sch_fq.c
1184
q->band_flows[0].quantum = 9 << 16;
net/sched/sch_fq.c
1185
q->band_flows[1].quantum = 3 << 16;
net/sched/sch_fq.c
1186
q->band_flows[2].quantum = 1 << 16;
net/sched/sch_fq.c
1187
q->delayed = RB_ROOT;
net/sched/sch_fq.c
1188
q->fq_root = NULL;
net/sched/sch_fq.c
1189
q->fq_trees_log = ilog2(1024);
net/sched/sch_fq.c
1190
q->orphan_mask = 1024 - 1;
net/sched/sch_fq.c
1191
q->low_rate_threshold = 550000 / 8;
net/sched/sch_fq.c
1193
q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */
net/sched/sch_fq.c
1195
q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */
net/sched/sch_fq.c
1196
q->horizon_drop = 1; /* by default, drop packets beyond horizon */
net/sched/sch_fq.c
1199
q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
net/sched/sch_fq.c
1201
fq_prio2band_compress_crumb(sch_default_prio2band, q->prio2band);
net/sched/sch_fq.c
1202
qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
net/sched/sch_fq.c
1207
err = fq_resize(sch, q->fq_trees_log);
net/sched/sch_fq.c
1214
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
1230
ce_threshold = READ_ONCE(q->ce_threshold);
net/sched/sch_fq.c
1233
horizon = READ_ONCE(q->horizon);
net/sched/sch_fq.c
1236
offload_horizon = READ_ONCE(q->offload_horizon);
net/sched/sch_fq.c
1242
READ_ONCE(q->flow_plimit)) ||
net/sched/sch_fq.c
1244
READ_ONCE(q->quantum)) ||
net/sched/sch_fq.c
1246
READ_ONCE(q->initial_quantum)) ||
net/sched/sch_fq.c
1248
READ_ONCE(q->rate_enable)) ||
net/sched/sch_fq.c
1251
READ_ONCE(q->flow_max_rate), ~0U)) ||
net/sched/sch_fq.c
1253
jiffies_to_usecs(READ_ONCE(q->flow_refill_delay))) ||
net/sched/sch_fq.c
1255
READ_ONCE(q->orphan_mask)) ||
net/sched/sch_fq.c
1257
READ_ONCE(q->low_rate_threshold)) ||
net/sched/sch_fq.c
1260
READ_ONCE(q->fq_trees_log)) ||
net/sched/sch_fq.c
1262
READ_ONCE(q->timer_slack)) ||
net/sched/sch_fq.c
1266
READ_ONCE(q->horizon_drop)))
net/sched/sch_fq.c
1269
fq_prio2band_decompress_crumb(q->prio2band, prio.priomap);
net/sched/sch_fq.c
1273
weights[0] = READ_ONCE(q->band_flows[0].quantum);
net/sched/sch_fq.c
1274
weights[1] = READ_ONCE(q->band_flows[1].quantum);
net/sched/sch_fq.c
1275
weights[2] = READ_ONCE(q->band_flows[2].quantum);
net/sched/sch_fq.c
1287
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
1295
st.gc_flows = q->stat_gc_flows;
net/sched/sch_fq.c
1297
st.fastpath_packets = q->internal.stat_fastpath_packets;
net/sched/sch_fq.c
1299
st.throttled = q->stat_throttled;
net/sched/sch_fq.c
1300
st.flows_plimit = q->stat_flows_plimit;
net/sched/sch_fq.c
1301
st.pkts_too_long = q->stat_pkts_too_long;
net/sched/sch_fq.c
1302
st.allocation_errors = q->stat_allocation_errors;
net/sched/sch_fq.c
1303
st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack -
net/sched/sch_fq.c
1305
st.flows = q->flows;
net/sched/sch_fq.c
1306
st.inactive_flows = q->inactive_flows;
net/sched/sch_fq.c
1307
st.throttled_flows = q->throttled_flows;
net/sched/sch_fq.c
1309
q->unthrottle_latency_ns, ~0U);
net/sched/sch_fq.c
1310
st.ce_mark = q->stat_ce_mark;
net/sched/sch_fq.c
1311
st.horizon_drops = q->stat_horizon_drops;
net/sched/sch_fq.c
1312
st.horizon_caps = q->stat_horizon_caps;
net/sched/sch_fq.c
1314
st.band_drops[i] = q->stat_band_drops[i];
net/sched/sch_fq.c
1315
st.band_pkt_count[i] = q->band_pkt_count[i];
net/sched/sch_fq.c
197
static void fq_flow_add_tail(struct fq_sched_data *q, struct fq_flow *flow,
net/sched/sch_fq.c
200
struct fq_perband_flows *pband = &q->band_flows[flow->band];
net/sched/sch_fq.c
213
static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
net/sched/sch_fq.c
215
rb_erase(&f->rate_node, &q->delayed);
net/sched/sch_fq.c
216
q->throttled_flows--;
net/sched/sch_fq.c
217
fq_flow_add_tail(q, f, OLD_FLOW);
net/sched/sch_fq.c
220
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
net/sched/sch_fq.c
222
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
net/sched/sch_fq.c
235
rb_insert_color(&f->rate_node, &q->delayed);
net/sched/sch_fq.c
236
q->throttled_flows++;
net/sched/sch_fq.c
237
q->stat_throttled++;
net/sched/sch_fq.c
240
if (q->time_next_delayed_flow > f->time_next_packet)
net/sched/sch_fq.c
241
q->time_next_delayed_flow = f->time_next_packet;
net/sched/sch_fq.c
256
static void fq_gc(struct fq_sched_data *q,
net/sched/sch_fq.c
295
q->flows -= fcnt;
net/sched/sch_fq.c
296
q->inactive_flows -= fcnt;
net/sched/sch_fq.c
297
q->stat_gc_flows += fcnt;
net/sched/sch_fq.c
313
const struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
316
if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
net/sched/sch_fq.c
319
if (sch->q.qlen != 0) {
net/sched/sch_fq.c
325
if (q->flows != q->inactive_flows + q->throttled_flows)
net/sched/sch_fq.c
331
if (q->internal.qlen >= 8)
net/sched/sch_fq.c
337
if (q->time_next_delayed_flow <= now + q->offload_horizon)
net/sched/sch_fq.c
346
if (q->flow_max_rate != ~0UL)
net/sched/sch_fq.c
355
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
372
unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
net/sched/sch_fq.c
380
unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
net/sched/sch_fq.c
393
q->internal.stat_fastpath_packets++;
net/sched/sch_fq.c
394
if (skb->sk == sk && q->rate_enable &&
net/sched/sch_fq.c
398
return &q->internal;
net/sched/sch_fq.c
401
root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
net/sched/sch_fq.c
403
fq_gc(q, root, sk);
net/sched/sch_fq.c
419
f->credit = q->initial_quantum;
net/sched/sch_fq.c
421
if (q->rate_enable)
net/sched/sch_fq.c
425
fq_flow_unset_throttled(q, f);
net/sched/sch_fq.c
438
q->stat_allocation_errors++;
net/sched/sch_fq.c
439
return &q->internal;
net/sched/sch_fq.c
447
if (q->rate_enable)
net/sched/sch_fq.c
451
f->credit = q->initial_quantum;
net/sched/sch_fq.c
456
q->flows++;
net/sched/sch_fq.c
457
q->inactive_flows++;
net/sched/sch_fq.c
500
sch->q.qlen--;
net/sched/sch_fq.c
537
const struct fq_sched_data *q, u64 now)
net/sched/sch_fq.c
539
return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
net/sched/sch_fq.c
547
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
552
band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
net/sched/sch_fq.c
553
if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
net/sched/sch_fq.c
554
q->stat_band_drops[band]++;
net/sched/sch_fq.c
564
if (fq_packet_beyond_horizon(skb, q, now)) {
net/sched/sch_fq.c
565
if (q->horizon_drop) {
net/sched/sch_fq.c
566
q->stat_horizon_drops++;
net/sched/sch_fq.c
570
q->stat_horizon_caps++;
net/sched/sch_fq.c
571
skb->tstamp = now + q->horizon;
net/sched/sch_fq.c
578
if (f != &q->internal) {
net/sched/sch_fq.c
579
if (unlikely(f->qlen >= q->flow_plimit)) {
net/sched/sch_fq.c
580
q->stat_flows_plimit++;
net/sched/sch_fq.c
586
fq_flow_add_tail(q, f, NEW_FLOW);
net/sched/sch_fq.c
587
if (time_after(jiffies, f->age + q->flow_refill_delay))
net/sched/sch_fq.c
588
f->credit = max_t(u32, f->credit, q->quantum);
net/sched/sch_fq.c
592
q->band_pkt_count[band]++;
net/sched/sch_fq.c
595
q->inactive_flows--;
net/sched/sch_fq.c
603
sch->q.qlen++;
net/sched/sch_fq.c
609
static void fq_check_throttled(struct fq_sched_data *q, u64 now)
net/sched/sch_fq.c
614
if (q->time_next_delayed_flow > now + q->offload_horizon)
net/sched/sch_fq.c
620
sample = (unsigned long)(now - q->time_next_delayed_flow);
net/sched/sch_fq.c
622
q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
net/sched/sch_fq.c
623
q->unthrottle_latency_ns += sample >> 3;
net/sched/sch_fq.c
625
now += q->offload_horizon;
net/sched/sch_fq.c
627
q->time_next_delayed_flow = ~0ULL;
net/sched/sch_fq.c
628
while ((p = rb_first(&q->delayed)) != NULL) {
net/sched/sch_fq.c
632
q->time_next_delayed_flow = f->time_next_packet;
net/sched/sch_fq.c
635
fq_flow_unset_throttled(q, f);
net/sched/sch_fq.c
652
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
662
if (!sch->q.qlen)
net/sched/sch_fq.c
665
skb = fq_peek(&q->internal);
net/sched/sch_fq.c
667
q->internal.qlen--;
net/sched/sch_fq.c
668
fq_dequeue_skb(sch, &q->internal, skb);
net/sched/sch_fq.c
673
fq_check_throttled(q, now);
net/sched/sch_fq.c
675
pband = &q->band_flows[q->band_nr];
net/sched/sch_fq.c
680
if (++q->band_nr == FQ_BANDS)
net/sched/sch_fq.c
681
q->band_nr = 0;
net/sched/sch_fq.c
682
pband = &q->band_flows[q->band_nr];
net/sched/sch_fq.c
689
if (q->time_next_delayed_flow != ~0ULL)
net/sched/sch_fq.c
690
qdisc_watchdog_schedule_range_ns(&q->watchdog,
net/sched/sch_fq.c
691
q->time_next_delayed_flow,
net/sched/sch_fq.c
692
q->timer_slack);
net/sched/sch_fq.c
698
f->credit += q->quantum;
net/sched/sch_fq.c
700
fq_flow_add_tail(q, f, OLD_FLOW);
net/sched/sch_fq.c
709
if (now + q->offload_horizon < time_next_packet) {
net/sched/sch_fq.c
712
fq_flow_set_throttled(q, f);
net/sched/sch_fq.c
717
if (unlikely((s64)(now - time_next_packet - q->ce_threshold) > 0)) {
net/sched/sch_fq.c
719
q->stat_ce_mark++;
net/sched/sch_fq.c
722
q->inactive_flows++;
net/sched/sch_fq.c
723
q->band_pkt_count[fq_skb_cb(skb)->band]--;
net/sched/sch_fq.c
728
fq_flow_add_tail(q, f, OLD_FLOW);
net/sched/sch_fq.c
738
if (!q->rate_enable)
net/sched/sch_fq.c
741
rate = q->flow_max_rate;
net/sched/sch_fq.c
751
if (rate <= q->low_rate_threshold) {
net/sched/sch_fq.c
754
plen = max(plen, q->quantum);
net/sched/sch_fq.c
770
q->stat_pkts_too_long++;
net/sched/sch_fq.c
802
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
808
sch->q.qlen = 0;
net/sched/sch_fq.c
811
fq_flow_purge(&q->internal);
net/sched/sch_fq.c
813
if (!q->fq_root)
net/sched/sch_fq.c
816
for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
net/sched/sch_fq.c
817
root = &q->fq_root[idx];
net/sched/sch_fq.c
828
q->band_flows[idx].new_flows.first = NULL;
net/sched/sch_fq.c
829
q->band_flows[idx].old_flows.first = NULL;
net/sched/sch_fq.c
830
q->band_pkt_count[idx] = 0;
net/sched/sch_fq.c
832
q->delayed = RB_ROOT;
net/sched/sch_fq.c
833
q->flows = 0;
net/sched/sch_fq.c
834
q->inactive_flows = 0;
net/sched/sch_fq.c
835
q->throttled_flows = 0;
net/sched/sch_fq.c
838
static void fq_rehash(struct fq_sched_data *q,
net/sched/sch_fq.c
878
q->flows -= fcnt;
net/sched/sch_fq.c
879
q->inactive_flows -= fcnt;
net/sched/sch_fq.c
880
q->stat_gc_flows += fcnt;
net/sched/sch_fq.c
890
struct fq_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq.c
895
if (q->fq_root && log == q->fq_trees_log)
net/sched/sch_fq.c
909
old_fq_root = q->fq_root;
net/sched/sch_fq.c
911
fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
net/sched/sch_fq.c
913
q->fq_root = array;
net/sched/sch_fq.c
914
WRITE_ONCE(q->fq_trees_log, log);
net/sched/sch_fq.c
974
static int fq_load_weights(struct fq_sched_data *q,
net/sched/sch_fq.c
989
WRITE_ONCE(q->band_flows[i].quantum, weights[i]);
net/sched/sch_fq.c
993
static int fq_load_priomap(struct fq_sched_data *q,
net/sched/sch_fq_codel.c
107
if (TC_H_MIN(res.classid) <= q->flows_cnt)
net/sched/sch_fq_codel.c
140
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
154
for (i = 0; i < q->flows_cnt; i++) {
net/sched/sch_fq_codel.c
155
if (q->backlogs[i] > maxbacklog) {
net/sched/sch_fq_codel.c
156
maxbacklog = q->backlogs[i];
net/sched/sch_fq_codel.c
164
flow = &q->flows[idx];
net/sched/sch_fq_codel.c
177
q->backlogs[idx] -= len;
net/sched/sch_fq_codel.c
178
q->memory_usage -= mem;
net/sched/sch_fq_codel.c
181
sch->q.qlen -= i;
net/sched/sch_fq_codel.c
188
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
205
flow = &q->flows[idx];
net/sched/sch_fq_codel.c
207
q->backlogs[idx] += qdisc_pkt_len(skb);
net/sched/sch_fq_codel.c
211
list_add_tail(&flow->flowchain, &q->new_flows);
net/sched/sch_fq_codel.c
212
q->new_flow_count++;
net/sched/sch_fq_codel.c
213
flow->deficit = q->quantum;
net/sched/sch_fq_codel.c
216
q->memory_usage += get_codel_cb(skb)->mem_usage;
net/sched/sch_fq_codel.c
217
memory_limited = q->memory_usage > q->memory_limit;
net/sched/sch_fq_codel.c
218
if (++sch->q.qlen <= sch->limit && !memory_limited)
net/sched/sch_fq_codel.c
222
prev_qlen = sch->q.qlen;
net/sched/sch_fq_codel.c
231
ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
net/sched/sch_fq_codel.c
233
prev_qlen -= sch->q.qlen;
net/sched/sch_fq_codel.c
235
q->drop_overlimit += prev_qlen;
net/sched/sch_fq_codel.c
237
q->drop_overmemory += prev_qlen;
net/sched/sch_fq_codel.c
259
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
266
q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
net/sched/sch_fq_codel.c
267
q->memory_usage -= get_codel_cb(skb)->mem_usage;
net/sched/sch_fq_codel.c
268
sch->q.qlen--;
net/sched/sch_fq_codel.c
284
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
290
head = &q->new_flows;
net/sched/sch_fq_codel.c
292
head = &q->old_flows;
net/sched/sch_fq_codel.c
299
flow->deficit += q->quantum;
net/sched/sch_fq_codel.c
300
list_move_tail(&flow->flowchain, &q->old_flows);
net/sched/sch_fq_codel.c
304
skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
net/sched/sch_fq_codel.c
305
&flow->cvars, &q->cstats, qdisc_pkt_len,
net/sched/sch_fq_codel.c
310
if ((head == &q->new_flows) && !list_empty(&q->old_flows))
net/sched/sch_fq_codel.c
311
list_move_tail(&flow->flowchain, &q->old_flows);
net/sched/sch_fq_codel.c
319
if (q->cstats.drop_count) {
net/sched/sch_fq_codel.c
320
qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
net/sched/sch_fq_codel.c
321
q->cstats.drop_len);
net/sched/sch_fq_codel.c
322
q->cstats.drop_count = 0;
net/sched/sch_fq_codel.c
323
q->cstats.drop_len = 0;
net/sched/sch_fq_codel.c
336
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
339
INIT_LIST_HEAD(&q->new_flows);
net/sched/sch_fq_codel.c
340
INIT_LIST_HEAD(&q->old_flows);
net/sched/sch_fq_codel.c
341
for (i = 0; i < q->flows_cnt; i++) {
net/sched/sch_fq_codel.c
342
struct fq_codel_flow *flow = q->flows + i;
net/sched/sch_fq_codel.c
348
memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
net/sched/sch_fq_codel.c
349
q->memory_usage = 0;
net/sched/sch_fq_codel.c
370
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
380
if (q->flows)
net/sched/sch_fq_codel.c
382
q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
net/sched/sch_fq_codel.c
383
if (!q->flows_cnt ||
net/sched/sch_fq_codel.c
384
q->flows_cnt > 65536)
net/sched/sch_fq_codel.c
399
WRITE_ONCE(q->cparams.target,
net/sched/sch_fq_codel.c
406
WRITE_ONCE(q->cparams.ce_threshold,
net/sched/sch_fq_codel.c
411
WRITE_ONCE(q->cparams.ce_threshold_selector,
net/sched/sch_fq_codel.c
414
WRITE_ONCE(q->cparams.ce_threshold_mask,
net/sched/sch_fq_codel.c
420
WRITE_ONCE(q->cparams.interval,
net/sched/sch_fq_codel.c
429
WRITE_ONCE(q->cparams.ecn,
net/sched/sch_fq_codel.c
433
WRITE_ONCE(q->quantum, quantum);
net/sched/sch_fq_codel.c
436
WRITE_ONCE(q->drop_batch_size,
net/sched/sch_fq_codel.c
440
WRITE_ONCE(q->memory_limit,
net/sched/sch_fq_codel.c
443
while (sch->q.qlen > sch->limit ||
net/sched/sch_fq_codel.c
444
q->memory_usage > q->memory_limit) {
net/sched/sch_fq_codel.c
462
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
464
tcf_block_put(q->block);
net/sched/sch_fq_codel.c
465
kvfree(q->backlogs);
net/sched/sch_fq_codel.c
466
kvfree(q->flows);
net/sched/sch_fq_codel.c
472
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
477
q->flows_cnt = 1024;
net/sched/sch_fq_codel.c
478
q->memory_limit = 32 << 20; /* 32 MBytes */
net/sched/sch_fq_codel.c
479
q->drop_batch_size = 64;
net/sched/sch_fq_codel.c
480
q->quantum = psched_mtu(qdisc_dev(sch));
net/sched/sch_fq_codel.c
481
INIT_LIST_HEAD(&q->new_flows);
net/sched/sch_fq_codel.c
482
INIT_LIST_HEAD(&q->old_flows);
net/sched/sch_fq_codel.c
483
codel_params_init(&q->cparams);
net/sched/sch_fq_codel.c
484
codel_stats_init(&q->cstats);
net/sched/sch_fq_codel.c
485
q->cparams.ecn = true;
net/sched/sch_fq_codel.c
486
q->cparams.mtu = psched_mtu(qdisc_dev(sch));
net/sched/sch_fq_codel.c
494
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_fq_codel.c
498
if (!q->flows) {
net/sched/sch_fq_codel.c
499
q->flows = kvzalloc_objs(struct fq_codel_flow, q->flows_cnt);
net/sched/sch_fq_codel.c
500
if (!q->flows) {
net/sched/sch_fq_codel.c
504
q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
net/sched/sch_fq_codel.c
505
if (!q->backlogs) {
net/sched/sch_fq_codel.c
509
for (i = 0; i < q->flows_cnt; i++) {
net/sched/sch_fq_codel.c
510
struct fq_codel_flow *flow = q->flows + i;
net/sched/sch_fq_codel.c
526
kvfree(q->flows);
net/sched/sch_fq_codel.c
527
q->flows = NULL;
net/sched/sch_fq_codel.c
529
q->flows_cnt = 0;
net/sched/sch_fq_codel.c
535
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
544
codel_time_to_us(READ_ONCE(q->cparams.target))) ||
net/sched/sch_fq_codel.c
548
codel_time_to_us(READ_ONCE(q->cparams.interval))) ||
net/sched/sch_fq_codel.c
550
READ_ONCE(q->cparams.ecn)) ||
net/sched/sch_fq_codel.c
552
READ_ONCE(q->quantum)) ||
net/sched/sch_fq_codel.c
554
READ_ONCE(q->drop_batch_size)) ||
net/sched/sch_fq_codel.c
556
READ_ONCE(q->memory_limit)) ||
net/sched/sch_fq_codel.c
558
READ_ONCE(q->flows_cnt)))
net/sched/sch_fq_codel.c
561
ce_threshold = READ_ONCE(q->cparams.ce_threshold);
net/sched/sch_fq_codel.c
567
READ_ONCE(q->cparams.ce_threshold_selector)))
net/sched/sch_fq_codel.c
570
READ_ONCE(q->cparams.ce_threshold_mask)))
net/sched/sch_fq_codel.c
582
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
588
st.qdisc_stats.maxpacket = q->cstats.maxpacket;
net/sched/sch_fq_codel.c
589
st.qdisc_stats.drop_overlimit = q->drop_overlimit;
net/sched/sch_fq_codel.c
590
st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
net/sched/sch_fq_codel.c
591
st.qdisc_stats.new_flow_count = q->new_flow_count;
net/sched/sch_fq_codel.c
592
st.qdisc_stats.ce_mark = q->cstats.ce_mark;
net/sched/sch_fq_codel.c
593
st.qdisc_stats.memory_usage = q->memory_usage;
net/sched/sch_fq_codel.c
594
st.qdisc_stats.drop_overmemory = q->drop_overmemory;
net/sched/sch_fq_codel.c
597
list_for_each(pos, &q->new_flows)
net/sched/sch_fq_codel.c
600
list_for_each(pos, &q->old_flows)
net/sched/sch_fq_codel.c
623
static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
net/sched/sch_fq_codel.c
630
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
634
return q->block;
net/sched/sch_fq_codel.c
647
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
652
if (idx < q->flows_cnt) {
net/sched/sch_fq_codel.c
653
const struct fq_codel_flow *flow = &q->flows[idx];
net/sched/sch_fq_codel.c
681
qs.backlog = q->backlogs[idx];
net/sched/sch_fq_codel.c
686
if (idx < q->flows_cnt)
net/sched/sch_fq_codel.c
693
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
699
for (i = 0; i < q->flows_cnt; i++) {
net/sched/sch_fq_codel.c
70
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
net/sched/sch_fq_codel.c
700
if (list_empty(&q->flows[i].flowchain)) {
net/sched/sch_fq_codel.c
73
return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
net/sched/sch_fq_codel.c
79
struct fq_codel_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_codel.c
86
TC_H_MIN(skb->priority) <= q->flows_cnt)
net/sched/sch_fq_codel.c
89
filter = rcu_dereference_bh(q->filter_list);
net/sched/sch_fq_codel.c
91
return fq_codel_hash(q, skb) + 1;
net/sched/sch_fq_pie.c
112
if (TC_H_MIN(res.classid) <= q->flows_cnt)
net/sched/sch_fq_pie.c
134
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
152
sel_flow = &q->flows[idx];
net/sched/sch_fq_pie.c
155
memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
net/sched/sch_fq_pie.c
159
q->stats.overlimit++;
net/sched/sch_fq_pie.c
162
q->overmemory++;
net/sched/sch_fq_pie.c
167
if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
net/sched/sch_fq_pie.c
170
} else if (q->p_params.ecn &&
net/sched/sch_fq_pie.c
171
sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
net/sched/sch_fq_pie.c
176
q->stats.ecn_mark++;
net/sched/sch_fq_pie.c
181
if (!q->p_params.dq_rate_estimator)
net/sched/sch_fq_pie.c
185
q->stats.packets_in++;
net/sched/sch_fq_pie.c
186
q->memory_usage += skb->truesize;
net/sched/sch_fq_pie.c
188
sch->q.qlen++;
net/sched/sch_fq_pie.c
191
list_add_tail(&sel_flow->flowchain, &q->new_flows);
net/sched/sch_fq_pie.c
192
q->new_flow_count++;
net/sched/sch_fq_pie.c
193
sel_flow->deficit = q->quantum;
net/sched/sch_fq_pie.c
202
q->stats.dropped++;
net/sched/sch_fq_pie.c
240
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
247
head = &q->new_flows;
net/sched/sch_fq_pie.c
249
head = &q->old_flows;
net/sched/sch_fq_pie.c
257
flow->deficit += q->quantum;
net/sched/sch_fq_pie.c
258
list_move_tail(&flow->flowchain, &q->old_flows);
net/sched/sch_fq_pie.c
266
sch->q.qlen--;
net/sched/sch_fq_pie.c
272
if (head == &q->new_flows && !list_empty(&q->old_flows))
net/sched/sch_fq_pie.c
273
list_move_tail(&flow->flowchain, &q->old_flows);
net/sched/sch_fq_pie.c
282
q->memory_usage -= get_pie_cb(skb)->mem_usage;
net/sched/sch_fq_pie.c
283
pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
net/sched/sch_fq_pie.c
291
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
303
WRITE_ONCE(q->p_params.limit, limit);
net/sched/sch_fq_pie.c
307
if (q->flows) {
net/sched/sch_fq_pie.c
312
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
net/sched/sch_fq_pie.c
313
if (!q->flows_cnt || q->flows_cnt > 65536) {
net/sched/sch_fq_pie.c
326
WRITE_ONCE(q->p_params.target,
net/sched/sch_fq_pie.c
332
WRITE_ONCE(q->p_params.tupdate,
net/sched/sch_fq_pie.c
336
WRITE_ONCE(q->p_params.alpha,
net/sched/sch_fq_pie.c
340
WRITE_ONCE(q->p_params.beta,
net/sched/sch_fq_pie.c
344
WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
net/sched/sch_fq_pie.c
347
WRITE_ONCE(q->memory_limit,
net/sched/sch_fq_pie.c
351
WRITE_ONCE(q->ecn_prob,
net/sched/sch_fq_pie.c
355
WRITE_ONCE(q->p_params.ecn,
net/sched/sch_fq_pie.c
359
WRITE_ONCE(q->p_params.bytemode,
net/sched/sch_fq_pie.c
363
WRITE_ONCE(q->p_params.dq_rate_estimator,
net/sched/sch_fq_pie.c
367
while (sch->q.qlen > sch->limit) {
net/sched/sch_fq_pie.c
389
struct fq_pie_sched_data *q = timer_container_of(q, t, adapt_timer);
net/sched/sch_fq_pie.c
391
struct Qdisc *sch = q->sch;
net/sched/sch_fq_pie.c
400
max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
net/sched/sch_fq_pie.c
402
pie_calculate_probability(&q->p_params,
net/sched/sch_fq_pie.c
403
&q->flows[q->flows_cursor].vars,
net/sched/sch_fq_pie.c
404
q->flows[q->flows_cursor].backlog);
net/sched/sch_fq_pie.c
405
q->flows_cursor++;
net/sched/sch_fq_pie.c
408
tupdate = q->p_params.tupdate;
net/sched/sch_fq_pie.c
410
if (q->flows_cursor >= q->flows_cnt) {
net/sched/sch_fq_pie.c
411
q->flows_cursor = 0;
net/sched/sch_fq_pie.c
415
mod_timer(&q->adapt_timer, jiffies + next);
net/sched/sch_fq_pie.c
423
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
427
pie_params_init(&q->p_params);
net/sched/sch_fq_pie.c
429
q->p_params.limit = sch->limit;
net/sched/sch_fq_pie.c
430
q->quantum = psched_mtu(qdisc_dev(sch));
net/sched/sch_fq_pie.c
431
q->sch = sch;
net/sched/sch_fq_pie.c
432
q->ecn_prob = 10;
net/sched/sch_fq_pie.c
433
q->flows_cnt = 1024;
net/sched/sch_fq_pie.c
434
q->memory_limit = SZ_32M;
net/sched/sch_fq_pie.c
436
INIT_LIST_HEAD(&q->new_flows);
net/sched/sch_fq_pie.c
437
INIT_LIST_HEAD(&q->old_flows);
net/sched/sch_fq_pie.c
438
timer_setup(&q->adapt_timer, fq_pie_timer, 0);
net/sched/sch_fq_pie.c
447
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_fq_pie.c
451
q->flows = kvzalloc_objs(struct fq_pie_flow, q->flows_cnt);
net/sched/sch_fq_pie.c
452
if (!q->flows) {
net/sched/sch_fq_pie.c
456
for (idx = 0; idx < q->flows_cnt; idx++) {
net/sched/sch_fq_pie.c
457
struct fq_pie_flow *flow = q->flows + idx;
net/sched/sch_fq_pie.c
463
mod_timer(&q->adapt_timer, jiffies + HZ / 2);
net/sched/sch_fq_pie.c
468
q->flows_cnt = 0;
net/sched/sch_fq_pie.c
475
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
484
nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) ||
net/sched/sch_fq_pie.c
486
((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) /
net/sched/sch_fq_pie.c
489
jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) ||
net/sched/sch_fq_pie.c
490
nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) ||
net/sched/sch_fq_pie.c
491
nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) ||
net/sched/sch_fq_pie.c
492
nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) ||
net/sched/sch_fq_pie.c
494
READ_ONCE(q->memory_limit)) ||
net/sched/sch_fq_pie.c
495
nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) ||
net/sched/sch_fq_pie.c
496
nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) ||
net/sched/sch_fq_pie.c
497
nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) ||
net/sched/sch_fq_pie.c
499
READ_ONCE(q->p_params.dq_rate_estimator)))
net/sched/sch_fq_pie.c
511
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
513
.packets_in = q->stats.packets_in,
net/sched/sch_fq_pie.c
514
.overlimit = q->stats.overlimit,
net/sched/sch_fq_pie.c
515
.overmemory = q->overmemory,
net/sched/sch_fq_pie.c
516
.dropped = q->stats.dropped,
net/sched/sch_fq_pie.c
517
.ecn_mark = q->stats.ecn_mark,
net/sched/sch_fq_pie.c
518
.new_flow_count = q->new_flow_count,
net/sched/sch_fq_pie.c
519
.memory_usage = q->memory_usage,
net/sched/sch_fq_pie.c
524
list_for_each(pos, &q->new_flows)
net/sched/sch_fq_pie.c
527
list_for_each(pos, &q->old_flows)
net/sched/sch_fq_pie.c
536
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
539
INIT_LIST_HEAD(&q->new_flows);
net/sched/sch_fq_pie.c
540
INIT_LIST_HEAD(&q->old_flows);
net/sched/sch_fq_pie.c
541
for (idx = 0; idx < q->flows_cnt; idx++) {
net/sched/sch_fq_pie.c
542
struct fq_pie_flow *flow = q->flows + idx;
net/sched/sch_fq_pie.c
555
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
557
tcf_block_put(q->block);
net/sched/sch_fq_pie.c
558
q->p_params.tupdate = 0;
net/sched/sch_fq_pie.c
559
timer_delete_sync(&q->adapt_timer);
net/sched/sch_fq_pie.c
560
kvfree(q->flows);
net/sched/sch_fq_pie.c
75
static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
net/sched/sch_fq_pie.c
78
return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
net/sched/sch_fq_pie.c
84
struct fq_pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_fq_pie.c
91
TC_H_MIN(skb->priority) <= q->flows_cnt)
net/sched/sch_fq_pie.c
94
filter = rcu_dereference_bh(q->filter_list);
net/sched/sch_fq_pie.c
96
return fq_pie_hash(q, skb) + 1;
net/sched/sch_generic.c
100
qdisc_maybe_clear_missed(q, txq);
net/sched/sch_generic.c
1037
qdisc->q.qlen = 0;
net/sched/sch_generic.c
1054
struct Qdisc *q = container_of(head, struct Qdisc, rcu);
net/sched/sch_generic.c
1056
qdisc_free(q);
net/sched/sch_generic.c
110
static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
net/sched/sch_generic.c
112
struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
net/sched/sch_generic.c
115
skb = __skb_dequeue_bad_txq(q);
net/sched/sch_generic.c
120
static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
net/sched/sch_generic.c
125
if (q->flags & TCQ_F_NOLOCK) {
net/sched/sch_generic.c
126
lock = qdisc_lock(q);
net/sched/sch_generic.c
1298
struct Qdisc *q;
net/sched/sch_generic.c
130
__skb_queue_tail(&q->skb_bad_txq, skb);
net/sched/sch_generic.c
1302
q = rtnl_dereference(dev_queue->qdisc_sleeping);
net/sched/sch_generic.c
1304
root_lock = qdisc_lock(q);
net/sched/sch_generic.c
1307
val = (qdisc_is_running(q) ||
net/sched/sch_generic.c
1308
test_bit(__QDISC_STATE_SCHED, &q->state));
net/sched/sch_generic.c
132
if (qdisc_is_percpu_stats(q)) {
net/sched/sch_generic.c
133
qdisc_qstats_cpu_backlog_inc(q, skb);
net/sched/sch_generic.c
134
qdisc_qstats_cpu_qlen_inc(q);
net/sched/sch_generic.c
136
qdisc_qstats_backlog_inc(q, skb);
net/sched/sch_generic.c
137
q->q.qlen++;
net/sched/sch_generic.c
144
static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
net/sched/sch_generic.c
148
if (q->flags & TCQ_F_NOLOCK) {
net/sched/sch_generic.c
149
lock = qdisc_lock(q);
net/sched/sch_generic.c
156
__skb_queue_tail(&q->gso_skb, skb);
net/sched/sch_generic.c
159
if (qdisc_is_percpu_stats(q)) {
net/sched/sch_generic.c
160
qdisc_qstats_cpu_requeues_inc(q);
net/sched/sch_generic.c
161
qdisc_qstats_cpu_backlog_inc(q, skb);
net/sched/sch_generic.c
162
qdisc_qstats_cpu_qlen_inc(q);
net/sched/sch_generic.c
164
q->qstats.requeues++;
net/sched/sch_generic.c
165
qdisc_qstats_backlog_inc(q, skb);
net/sched/sch_generic.c
166
q->q.qlen++;
net/sched/sch_generic.c
174
set_bit(__QDISC_STATE_MISSED, &q->state);
net/sched/sch_generic.c
176
__netif_schedule(q);
net/sched/sch_generic.c
180
static void try_bulk_dequeue_skb(struct Qdisc *q,
net/sched/sch_generic.c
189
struct sk_buff *nskb = q->dequeue(q);
net/sched/sch_generic.c
207
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
net/sched/sch_generic.c
216
nskb = q->dequeue(q);
net/sched/sch_generic.c
220
qdisc_enqueue_skb_bad_txq(q, nskb);
net/sched/sch_generic.c
233
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
net/sched/sch_generic.c
236
const struct netdev_queue *txq = q->dev_queue;
net/sched/sch_generic.c
240
if (unlikely(!skb_queue_empty(&q->gso_skb))) {
net/sched/sch_generic.c
243
if (q->flags & TCQ_F_NOLOCK) {
net/sched/sch_generic.c
244
lock = qdisc_lock(q);
net/sched/sch_generic.c
248
skb = skb_peek(&q->gso_skb);
net/sched/sch_generic.c
266
skb = __skb_dequeue(&q->gso_skb);
net/sched/sch_generic.c
267
if (qdisc_is_percpu_stats(q)) {
net/sched/sch_generic.c
268
qdisc_qstats_cpu_backlog_dec(q, skb);
net/sched/sch_generic.c
269
qdisc_qstats_cpu_qlen_dec(q);
net/sched/sch_generic.c
271
qdisc_qstats_backlog_dec(q, skb);
net/sched/sch_generic.c
272
q->q.qlen--;
net/sched/sch_generic.c
276
qdisc_maybe_clear_missed(q, txq);
net/sched/sch_generic.c
285
if ((q->flags & TCQ_F_ONETXQUEUE) &&
net/sched/sch_generic.c
287
qdisc_maybe_clear_missed(q, txq);
net/sched/sch_generic.c
291
skb = qdisc_dequeue_skb_bad_txq(q);
net/sched/sch_generic.c
297
skb = q->dequeue(q);
net/sched/sch_generic.c
300
if (qdisc_may_bulk(q))
net/sched/sch_generic.c
301
try_bulk_dequeue_skb(q, skb, txq, packets, budget);
net/sched/sch_generic.c
303
try_bulk_dequeue_skb_slow(q, skb, packets);
net/sched/sch_generic.c
306
trace_qdisc_dequeue(q, txq, *packets, skb);
net/sched/sch_generic.c
319
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
net/sched/sch_generic.c
339
dev_requeue_skb(skb, q);
net/sched/sch_generic.c
349
qdisc_maybe_clear_missed(q, txq);
net/sched/sch_generic.c
365
dev->name, ret, q->q.qlen);
net/sched/sch_generic.c
367
dev_requeue_skb(skb, q);
net/sched/sch_generic.c
393
static inline bool qdisc_restart(struct Qdisc *q, int *packets, int budget)
net/sched/sch_generic.c
40
static void qdisc_maybe_clear_missed(struct Qdisc *q,
net/sched/sch_generic.c
402
skb = dequeue_skb(q, &validate, packets, budget);
net/sched/sch_generic.c
406
if (!(q->flags & TCQ_F_NOLOCK))
net/sched/sch_generic.c
407
root_lock = qdisc_lock(q);
net/sched/sch_generic.c
409
dev = qdisc_dev(q);
net/sched/sch_generic.c
412
return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
net/sched/sch_generic.c
415
void __qdisc_run(struct Qdisc *q)
net/sched/sch_generic.c
420
while (qdisc_restart(q, &packets, quota)) {
net/sched/sch_generic.c
423
if (q->flags & TCQ_F_NOLOCK)
net/sched/sch_generic.c
424
set_bit(__QDISC_STATE_MISSED, &q->state);
net/sched/sch_generic.c
426
__netif_schedule(q);
net/sched/sch_generic.c
43
clear_bit(__QDISC_STATE_MISSED, &q->state);
net/sched/sch_generic.c
56
set_bit(__QDISC_STATE_MISSED, &q->state);
net/sched/sch_generic.c
58
set_bit(__QDISC_STATE_DRAINING, &q->state);
net/sched/sch_generic.c
670
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
net/sched/sch_generic.c
723
struct skb_array q[PFIFO_FAST_BANDS];
net/sched/sch_generic.c
729
return &priv->q[band];
net/sched/sch_generic.c
737
struct skb_array *q = band2list(priv, band);
net/sched/sch_generic.c
74
static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
net/sched/sch_generic.c
741
err = skb_array_produce(q, skb);
net/sched/sch_generic.c
76
const struct netdev_queue *txq = q->dev_queue;
net/sched/sch_generic.c
765
struct skb_array *q = band2list(priv, band);
net/sched/sch_generic.c
767
if (__skb_array_empty(q))
net/sched/sch_generic.c
770
skb = __skb_array_consume(q);
net/sched/sch_generic.c
80
if (q->flags & TCQ_F_NOLOCK) {
net/sched/sch_generic.c
804
struct skb_array *q = band2list(priv, band);
net/sched/sch_generic.c
806
skb = __skb_array_peek(q);
net/sched/sch_generic.c
81
lock = qdisc_lock(q);
net/sched/sch_generic.c
818
struct skb_array *q = band2list(priv, band);
net/sched/sch_generic.c
824
if (!q->ring.queue)
net/sched/sch_generic.c
827
while ((skb = __skb_array_consume(q)) != NULL)
net/sched/sch_generic.c
833
struct gnet_stats_queue *q;
net/sched/sch_generic.c
835
q = per_cpu_ptr(qdisc->cpu_qstats, i);
net/sched/sch_generic.c
836
q->backlog = 0;
net/sched/sch_generic.c
837
q->qlen = 0;
net/sched/sch_generic.c
85
skb = skb_peek(&q->skb_bad_txq);
net/sched/sch_generic.c
867
struct skb_array *q = band2list(priv, prio);
net/sched/sch_generic.c
870
err = skb_array_init(q, qlen, GFP_KERNEL);
net/sched/sch_generic.c
886
struct skb_array *q = band2list(priv, prio);
net/sched/sch_generic.c
891
if (!q->ring.queue)
net/sched/sch_generic.c
896
ptr_ring_cleanup(&q->ring, NULL);
net/sched/sch_generic.c
90
skb = __skb_dequeue(&q->skb_bad_txq);
net/sched/sch_generic.c
908
struct skb_array *q = band2list(priv, prio);
net/sched/sch_generic.c
91
if (qdisc_is_percpu_stats(q)) {
net/sched/sch_generic.c
910
bands[prio] = q;
net/sched/sch_generic.c
92
qdisc_qstats_cpu_backlog_dec(q, skb);
net/sched/sch_generic.c
93
qdisc_qstats_cpu_qlen_dec(q);
net/sched/sch_generic.c
95
qdisc_qstats_backlog_dec(q, skb);
net/sched/sch_generic.c
96
q->q.qlen--;
net/sched/sch_gred.c
102
if (q == NULL)
net/sched/sch_gred.c
106
if (table->tab[n] && table->tab[n]->prio == q->prio)
net/sched/sch_gred.c
114
struct gred_sched_data *q,
net/sched/sch_gred.c
120
return q->backlog;
net/sched/sch_gred.c
129
struct gred_sched_data *q)
net/sched/sch_gred.c
131
q->vars.qavg = table->wred_set.qavg;
net/sched/sch_gred.c
132
q->vars.qidlestart = table->wred_set.qidlestart;
net/sched/sch_gred.c
136
struct gred_sched_data *q)
net/sched/sch_gred.c
138
table->wred_set.qavg = q->vars.qavg;
net/sched/sch_gred.c
139
table->wred_set.qidlestart = q->vars.qidlestart;
net/sched/sch_gred.c
142
static int gred_use_ecn(struct gred_sched_data *q)
net/sched/sch_gred.c
144
return q->red_flags & TC_RED_ECN;
net/sched/sch_gred.c
147
static int gred_use_harddrop(struct gred_sched_data *q)
net/sched/sch_gred.c
149
return q->red_flags & TC_RED_HARDDROP;
net/sched/sch_gred.c
168
struct gred_sched_data *q = NULL;
net/sched/sch_gred.c
173
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
net/sched/sch_gred.c
176
q = t->tab[dp];
net/sched/sch_gred.c
177
if (!q) {
net/sched/sch_gred.c
199
if (t->tab[i] && t->tab[i]->prio < q->prio &&
net/sched/sch_gred.c
206
q->packetsin++;
net/sched/sch_gred.c
207
q->bytesin += qdisc_pkt_len(skb);
net/sched/sch_gred.c
210
gred_load_wred_set(t, q);
net/sched/sch_gred.c
212
q->vars.qavg = red_calc_qavg(&q->parms,
net/sched/sch_gred.c
213
&q->vars,
net/sched/sch_gred.c
214
gred_backlog(t, q, sch));
net/sched/sch_gred.c
216
if (red_is_idling(&q->vars))
net/sched/sch_gred.c
217
red_end_of_idle_period(&q->vars);
net/sched/sch_gred.c
220
gred_store_wred_set(t, q);
net/sched/sch_gred.c
222
switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
net/sched/sch_gred.c
228
if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
net/sched/sch_gred.c
229
q->stats.prob_drop++;
net/sched/sch_gred.c
233
q->stats.prob_mark++;
net/sched/sch_gred.c
238
if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
net/sched/sch_gred.c
240
q->stats.forced_drop++;
net/sched/sch_gred.c
243
q->stats.forced_mark++;
net/sched/sch_gred.c
247
if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
net/sched/sch_gred.c
248
q->backlog += qdisc_pkt_len(skb);
net/sched/sch_gred.c
252
q->stats.pdrop++;
net/sched/sch_gred.c
269
struct gred_sched_data *q;
net/sched/sch_gred.c
272
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
net/sched/sch_gred.c
276
q->backlog -= qdisc_pkt_len(skb);
net/sched/sch_gred.c
282
if (!q->backlog)
net/sched/sch_gred.c
283
red_start_of_idle_period(&q->vars);
net/sched/sch_gred.c
301
struct gred_sched_data *q = t->tab[i];
net/sched/sch_gred.c
303
if (!q)
net/sched/sch_gred.c
306
red_restart(&q->vars);
net/sched/sch_gred.c
307
q->backlog = 0;
net/sched/sch_gred.c
334
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
336
if (!q)
net/sched/sch_gred.c
339
opt->set.tab[i].limit = q->limit;
net/sched/sch_gred.c
340
opt->set.tab[i].prio = q->prio;
net/sched/sch_gred.c
341
opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
net/sched/sch_gred.c
342
opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
net/sched/sch_gred.c
343
opt->set.tab[i].is_ecn = gred_use_ecn(q);
net/sched/sch_gred.c
344
opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
net/sched/sch_gred.c
345
opt->set.tab[i].probability = q->parms.max_P;
net/sched/sch_gred.c
346
opt->set.tab[i].backlog = &q->backlog;
net/sched/sch_gred.c
403
static inline void gred_destroy_vq(struct gred_sched_data *q)
net/sched/sch_gred.c
405
kfree(q);
net/sched/sch_gred.c
488
struct gred_sched_data *q = table->tab[dp];
net/sched/sch_gred.c
495
if (!q) {
net/sched/sch_gred.c
496
table->tab[dp] = q = *prealloc;
net/sched/sch_gred.c
498
if (!q)
net/sched/sch_gred.c
500
q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
net/sched/sch_gred.c
503
q->DP = dp;
net/sched/sch_gred.c
504
q->prio = prio;
net/sched/sch_gred.c
506
q->limit = sch->limit;
net/sched/sch_gred.c
508
q->limit = ctl->limit;
net/sched/sch_gred.c
510
if (q->backlog == 0)
net/sched/sch_gred.c
511
red_end_of_idle_period(&q->vars);
net/sched/sch_gred.c
513
red_set_parms(&q->parms,
net/sched/sch_gred.c
516
red_set_vars(&q->vars);
net/sched/sch_gred.c
791
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
793
max_p[i] = q ? q->parms.max_P : 0;
net/sched/sch_gred.c
807
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
813
if (!q) {
net/sched/sch_gred.c
822
opt.limit = q->limit;
net/sched/sch_gred.c
823
opt.DP = q->DP;
net/sched/sch_gred.c
824
opt.backlog = gred_backlog(table, q, sch);
net/sched/sch_gred.c
825
opt.prio = q->prio;
net/sched/sch_gred.c
826
opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
net/sched/sch_gred.c
827
opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
net/sched/sch_gred.c
828
opt.Wlog = q->parms.Wlog;
net/sched/sch_gred.c
829
opt.Plog = q->parms.Plog;
net/sched/sch_gred.c
830
opt.Scell_log = q->parms.Scell_log;
net/sched/sch_gred.c
831
opt.early = q->stats.prob_drop;
net/sched/sch_gred.c
832
opt.forced = q->stats.forced_drop;
net/sched/sch_gred.c
833
opt.pdrop = q->stats.pdrop;
net/sched/sch_gred.c
834
opt.packets = q->packetsin;
net/sched/sch_gred.c
835
opt.bytesin = q->bytesin;
net/sched/sch_gred.c
838
gred_load_wred_set(table, q);
net/sched/sch_gred.c
840
qavg = red_calc_qavg(&q->parms, &q->vars,
net/sched/sch_gred.c
841
q->vars.qavg >> q->parms.Wlog);
net/sched/sch_gred.c
842
opt.qave = qavg >> q->parms.Wlog;
net/sched/sch_gred.c
857
struct gred_sched_data *q = table->tab[i];
net/sched/sch_gred.c
860
if (!q)
net/sched/sch_gred.c
867
if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
net/sched/sch_gred.c
870
if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
net/sched/sch_gred.c
874
if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
net/sched/sch_gred.c
877
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
net/sched/sch_gred.c
880
gred_backlog(table, q, sch)))
net/sched/sch_gred.c
883
q->stats.prob_drop))
net/sched/sch_gred.c
886
q->stats.prob_mark))
net/sched/sch_gred.c
889
q->stats.forced_drop))
net/sched/sch_gred.c
892
q->stats.forced_mark))
net/sched/sch_gred.c
894
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
net/sched/sch_gred.c
99
struct gred_sched_data *q = table->tab[i];
net/sched/sch_hfsc.c
1013
parent = &q->root;
net/sched/sch_hfsc.c
1058
cl->sched = q;
net/sched/sch_hfsc.c
1072
if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
net/sched/sch_hfsc.c
1077
qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
net/sched/sch_hfsc.c
1084
qdisc_class_hash_grow(sch, &q->clhash);
net/sched/sch_hfsc.c
1093
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1098
if (cl != &q->root)
net/sched/sch_hfsc.c
1106
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1110
cl == &q->root) {
net/sched/sch_hfsc.c
1121
qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
net/sched/sch_hfsc.c
1132
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1144
head = &q->root;
net/sched/sch_hfsc.c
1145
tcf = rcu_dereference_bh(q->root.filter_list);
net/sched/sch_hfsc.c
1177
READ_ONCE(q->defcls)), sch);
net/sched/sch_hfsc.c
1260
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1264
cl = &q->root;
net/sched/sch_hfsc.c
1359
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1366
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_hfsc.c
1367
hlist_for_each_entry(cl, &q->clhash.hash[i],
net/sched/sch_hfsc.c
1378
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1382
cl = eltree_get_minel(q);
net/sched/sch_hfsc.c
1385
if (q->root.cl_cfmin != 0) {
net/sched/sch_hfsc.c
1386
if (next_time == 0 || next_time > q->root.cl_cfmin)
net/sched/sch_hfsc.c
1387
next_time = q->root.cl_cfmin;
net/sched/sch_hfsc.c
1390
qdisc_watchdog_schedule(&q->watchdog, next_time);
net/sched/sch_hfsc.c
1397
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1401
qdisc_watchdog_init(&q->watchdog, sch);
net/sched/sch_hfsc.c
1407
q->defcls = qopt->defcls;
net/sched/sch_hfsc.c
1408
err = qdisc_class_hash_init(&q->clhash);
net/sched/sch_hfsc.c
1411
q->eligible = RB_ROOT;
net/sched/sch_hfsc.c
1413
err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
net/sched/sch_hfsc.c
1417
gnet_stats_basic_sync_init(&q->root.bstats);
net/sched/sch_hfsc.c
1418
q->root.cl_common.classid = sch->handle;
net/sched/sch_hfsc.c
1419
q->root.sched = q;
net/sched/sch_hfsc.c
1420
q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
net/sched/sch_hfsc.c
1422
if (q->root.qdisc == NULL)
net/sched/sch_hfsc.c
1423
q->root.qdisc = &noop_qdisc;
net/sched/sch_hfsc.c
1425
qdisc_hash_add(q->root.qdisc, true);
net/sched/sch_hfsc.c
1426
INIT_LIST_HEAD(&q->root.children);
net/sched/sch_hfsc.c
1427
q->root.vt_tree = RB_ROOT;
net/sched/sch_hfsc.c
1428
q->root.cf_tree = RB_ROOT;
net/sched/sch_hfsc.c
1430
qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
net/sched/sch_hfsc.c
1431
qdisc_class_hash_grow(sch, &q->clhash);
net/sched/sch_hfsc.c
1440
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1447
WRITE_ONCE(q->defcls, qopt->defcls);
net/sched/sch_hfsc.c
1485
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1489
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_hfsc.c
1490
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
net/sched/sch_hfsc.c
1493
q->eligible = RB_ROOT;
net/sched/sch_hfsc.c
1494
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_hfsc.c
1500
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1505
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_hfsc.c
1506
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
net/sched/sch_hfsc.c
1511
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_hfsc.c
1512
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
net/sched/sch_hfsc.c
1516
qdisc_class_hash_destroy(&q->clhash);
net/sched/sch_hfsc.c
1517
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_hfsc.c
1523
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1527
qopt.defcls = READ_ONCE(q->defcls);
net/sched/sch_hfsc.c
1553
first = !cl->qdisc->q.qlen;
net/sched/sch_hfsc.c
1564
sch->q.qlen++;
net/sched/sch_hfsc.c
1587
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
1594
if (sch->q.qlen == 0)
net/sched/sch_hfsc.c
1604
cl = eltree_get_mindl(q, cur_time);
net/sched/sch_hfsc.c
1612
cl = vttree_get_minvt(&q->root, cur_time);
net/sched/sch_hfsc.c
1632
if (cl->qdisc->q.qlen != 0) {
net/sched/sch_hfsc.c
1639
if (cl->qdisc->q.qlen != 0) {
net/sched/sch_hfsc.c
1653
sch->q.qlen--;
net/sched/sch_hfsc.c
226
eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
net/sched/sch_hfsc.c
231
for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
net/sched/sch_hfsc.c
243
eltree_get_minel(struct hfsc_sched *q)
net/sched/sch_hfsc.c
247
n = rb_first(&q->eligible);
net/sched/sch_hfsc.c
756
if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
net/sched/sch_hfsc.c
857
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
860
clc = qdisc_class_find(&q->clhash, classid);
net/sched/sch_hfsc.c
916
struct hfsc_sched *q = qdisc_priv(sch);
net/sched/sch_hfsc.c
984
if (cl->qdisc->q.qlen != 0)
net/sched/sch_hfsc.c
990
if (cl->qdisc->q.qlen != 0) {
net/sched/sch_hhf.c
182
struct hhf_sched_data *q)
net/sched/sch_hhf.c
191
u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
net/sched/sch_hhf.c
201
q->hh_flows_current_cnt--;
net/sched/sch_hhf.c
213
struct hhf_sched_data *q)
net/sched/sch_hhf.c
221
u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
net/sched/sch_hhf.c
228
if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
net/sched/sch_hhf.c
229
q->hh_flows_overlimit++;
net/sched/sch_hhf.c
237
q->hh_flows_current_cnt++;
net/sched/sch_hhf.c
249
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
259
prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
net/sched/sch_hhf.c
262
bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
net/sched/sch_hhf.c
263
q->hhf_arrays_reset_timestamp = now;
net/sched/sch_hhf.c
267
hash = skb_get_hash_perturb(skb, &q->perturbation);
net/sched/sch_hhf.c
271
flow = seek_list(hash, &q->hh_flows[flow_pos], q);
net/sched/sch_hhf.c
294
if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
net/sched/sch_hhf.c
295
q->hhf_arrays[i][filter_pos[i]] = 0;
net/sched/sch_hhf.c
296
__set_bit(filter_pos[i], q->hhf_valid_bits[i]);
net/sched/sch_hhf.c
299
val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
net/sched/sch_hhf.c
305
if (min_hhf_val > q->hhf_admit_bytes) {
net/sched/sch_hhf.c
307
flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
net/sched/sch_hhf.c
312
q->hh_flows_total_cnt++;
net/sched/sch_hhf.c
322
if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
net/sched/sch_hhf.c
323
q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
net/sched/sch_hhf.c
351
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
355
bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
net/sched/sch_hhf.c
357
bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
net/sched/sch_hhf.c
362
sch->q.qlen--;
net/sched/sch_hhf.c
368
return bucket - q->buckets;
net/sched/sch_hhf.c
374
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
381
bucket = &q->buckets[idx];
net/sched/sch_hhf.c
395
list_add_tail(&bucket->bucketchain, &q->old_buckets);
net/sched/sch_hhf.c
397
weight = q->hhf_non_hh_weight;
net/sched/sch_hhf.c
398
list_add_tail(&bucket->bucketchain, &q->new_buckets);
net/sched/sch_hhf.c
400
bucket->deficit = weight * q->quantum;
net/sched/sch_hhf.c
402
if (++sch->q.qlen <= sch->limit)
net/sched/sch_hhf.c
406
q->drop_overlimit++;
net/sched/sch_hhf.c
420
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
426
head = &q->new_buckets;
net/sched/sch_hhf.c
428
head = &q->old_buckets;
net/sched/sch_hhf.c
435
int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
net/sched/sch_hhf.c
436
1 : q->hhf_non_hh_weight;
net/sched/sch_hhf.c
438
bucket->deficit += weight * q->quantum;
net/sched/sch_hhf.c
439
list_move_tail(&bucket->bucketchain, &q->old_buckets);
net/sched/sch_hhf.c
445
sch->q.qlen--;
net/sched/sch_hhf.c
451
if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
net/sched/sch_hhf.c
452
list_move_tail(&bucket->bucketchain, &q->old_buckets);
net/sched/sch_hhf.c
474
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
477
kvfree(q->hhf_arrays[i]);
net/sched/sch_hhf.c
478
kvfree(q->hhf_valid_bits[i]);
net/sched/sch_hhf.c
481
if (!q->hh_flows)
net/sched/sch_hhf.c
486
struct list_head *head = &q->hh_flows[i];
net/sched/sch_hhf.c
495
kvfree(q->hh_flows);
net/sched/sch_hhf.c
512
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
516
u32 new_quantum = q->quantum;
net/sched/sch_hhf.c
517
u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
net/sched/sch_hhf.c
539
WRITE_ONCE(q->quantum, new_quantum);
net/sched/sch_hhf.c
540
WRITE_ONCE(q->hhf_non_hh_weight, new_hhf_non_hh_weight);
net/sched/sch_hhf.c
543
WRITE_ONCE(q->hh_flows_limit,
net/sched/sch_hhf.c
549
WRITE_ONCE(q->hhf_reset_timeout,
net/sched/sch_hhf.c
554
WRITE_ONCE(q->hhf_admit_bytes,
net/sched/sch_hhf.c
560
WRITE_ONCE(q->hhf_evict_timeout,
net/sched/sch_hhf.c
564
while (sch->q.qlen > sch->limit) {
net/sched/sch_hhf.c
583
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
587
q->quantum = psched_mtu(qdisc_dev(sch));
net/sched/sch_hhf.c
588
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
net/sched/sch_hhf.c
589
INIT_LIST_HEAD(&q->new_buckets);
net/sched/sch_hhf.c
590
INIT_LIST_HEAD(&q->old_buckets);
net/sched/sch_hhf.c
593
q->hhf_reset_timeout = HZ / 25; /* 40 ms */
net/sched/sch_hhf.c
594
q->hhf_admit_bytes = 131072; /* 128 KB */
net/sched/sch_hhf.c
595
q->hhf_evict_timeout = HZ; /* 1 sec */
net/sched/sch_hhf.c
596
q->hhf_non_hh_weight = 2;
net/sched/sch_hhf.c
605
if (!q->hh_flows) {
net/sched/sch_hhf.c
607
q->hh_flows = kvzalloc_objs(struct list_head, HH_FLOWS_CNT);
net/sched/sch_hhf.c
608
if (!q->hh_flows)
net/sched/sch_hhf.c
611
INIT_LIST_HEAD(&q->hh_flows[i]);
net/sched/sch_hhf.c
614
q->hh_flows_limit = 2 * HH_FLOWS_CNT;
net/sched/sch_hhf.c
615
q->hh_flows_overlimit = 0;
net/sched/sch_hhf.c
616
q->hh_flows_total_cnt = 0;
net/sched/sch_hhf.c
617
q->hh_flows_current_cnt = 0;
net/sched/sch_hhf.c
621
q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
net/sched/sch_hhf.c
624
if (!q->hhf_arrays[i]) {
net/sched/sch_hhf.c
631
q->hhf_arrays_reset_timestamp = hhf_time_stamp();
net/sched/sch_hhf.c
635
q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
net/sched/sch_hhf.c
637
if (!q->hhf_valid_bits[i]) {
net/sched/sch_hhf.c
647
struct wdrr_bucket *bucket = q->buckets + i;
net/sched/sch_hhf.c
658
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
666
nla_put_u32(skb, TCA_HHF_QUANTUM, READ_ONCE(q->quantum)) ||
net/sched/sch_hhf.c
668
READ_ONCE(q->hh_flows_limit)) ||
net/sched/sch_hhf.c
670
jiffies_to_usecs(READ_ONCE(q->hhf_reset_timeout))) ||
net/sched/sch_hhf.c
672
READ_ONCE(q->hhf_admit_bytes)) ||
net/sched/sch_hhf.c
674
jiffies_to_usecs(READ_ONCE(q->hhf_evict_timeout))) ||
net/sched/sch_hhf.c
676
READ_ONCE(q->hhf_non_hh_weight)))
net/sched/sch_hhf.c
687
struct hhf_sched_data *q = qdisc_priv(sch);
net/sched/sch_hhf.c
689
.drop_overlimit = q->drop_overlimit,
net/sched/sch_hhf.c
690
.hh_overlimit = q->hh_flows_overlimit,
net/sched/sch_hhf.c
691
.hh_tot_count = q->hh_flows_total_cnt,
net/sched/sch_hhf.c
692
.hh_cur_count = q->hh_flows_current_cnt,
net/sched/sch_htb.c
1002
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1006
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_htb.c
1007
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_htb.c
1011
if (cl->leaf.q && !q->offload)
net/sched/sch_htb.c
1012
qdisc_reset(cl->leaf.q);
net/sched/sch_htb.c
1018
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_htb.c
1019
__qdisc_reset_queue(&q->direct_queue);
net/sched/sch_htb.c
1020
memset(q->hlevel, 0, sizeof(q->hlevel));
net/sched/sch_htb.c
1021
memset(q->row_mask, 0, sizeof(q->row_mask));
net/sched/sch_htb.c
1037
struct htb_sched *q = container_of(work, struct htb_sched, work);
net/sched/sch_htb.c
1038
struct Qdisc *sch = q->watchdog.qdisc;
net/sched/sch_htb.c
1055
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1062
qdisc_watchdog_init(&q->watchdog, sch);
net/sched/sch_htb.c
1063
INIT_WORK(&q->work, htb_work_func);
net/sched/sch_htb.c
1068
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_htb.c
1097
q->num_direct_qdiscs = dev->real_num_tx_queues;
net/sched/sch_htb.c
1098
q->direct_qdiscs = kzalloc_objs(*q->direct_qdiscs,
net/sched/sch_htb.c
1099
q->num_direct_qdiscs);
net/sched/sch_htb.c
1100
if (!q->direct_qdiscs)
net/sched/sch_htb.c
1104
err = qdisc_class_hash_init(&q->clhash);
net/sched/sch_htb.c
1109
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
net/sched/sch_htb.c
1111
q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
net/sched/sch_htb.c
1113
if ((q->rate2quantum = gopt->rate2quantum) < 1)
net/sched/sch_htb.c
1114
q->rate2quantum = 1;
net/sched/sch_htb.c
1115
q->defcls = gopt->defcls;
net/sched/sch_htb.c
1120
for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
net/sched/sch_htb.c
1130
q->direct_qdiscs[ntx] = qdisc;
net/sched/sch_htb.c
1139
.classid = TC_H_MIN(q->defcls),
net/sched/sch_htb.c
1149
q->offload = true;
net/sched/sch_htb.c
1157
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1160
for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
net/sched/sch_htb.c
1161
struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
net/sched/sch_htb.c
1167
for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
net/sched/sch_htb.c
1174
kfree(q->direct_qdiscs);
net/sched/sch_htb.c
1175
q->direct_qdiscs = NULL;
net/sched/sch_htb.c
1196
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1198
if (q->offload)
net/sched/sch_htb.c
1206
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1210
if (q->offload)
net/sched/sch_htb.c
1215
sch->qstats.overlimits = q->overlimits;
net/sched/sch_htb.c
1220
gopt.direct_pkts = q->direct_pkts;
net/sched/sch_htb.c
1222
gopt.rate2quantum = q->rate2quantum;
net/sched/sch_htb.c
1223
gopt.defcls = q->defcls;
net/sched/sch_htb.c
1230
nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
net/sched/sch_htb.c
1232
if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
net/sched/sch_htb.c
1246
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1255
if (!cl->level && cl->leaf.q)
net/sched/sch_htb.c
1256
tcm->tcm_info = cl->leaf.q->handle;
net/sched/sch_htb.c
126
struct Qdisc *q;
net/sched/sch_htb.c
1273
if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
net/sched/sch_htb.c
1291
static void htb_offload_aggregate_stats(struct htb_sched *q,
net/sched/sch_htb.c
1300
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_htb.c
1301
hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
net/sched/sch_htb.c
1313
bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
net/sched/sch_htb.c
1314
packets += u64_stats_read(&c->leaf.q->bstats.packets);
net/sched/sch_htb.c
1325
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1332
if (!cl->level && cl->leaf.q)
net/sched/sch_htb.c
1333
qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
net/sched/sch_htb.c
1340
if (q->offload) {
net/sched/sch_htb.c
1342
if (cl->leaf.q)
net/sched/sch_htb.c
1343
cl->bstats = cl->leaf.q->bstats;
net/sched/sch_htb.c
1350
htb_offload_aggregate_stats(q, cl);
net/sched/sch_htb.c
1367
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1370
if (!q->offload)
net/sched/sch_htb.c
1405
if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
net/sched/sch_htb.c
1406
WARN_ON(cl->leaf.q->dev_queue != queue);
net/sched/sch_htb.c
1426
WARN_ON(qdisc != cl_old->leaf.q);
net/sched/sch_htb.c
1429
if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
net/sched/sch_htb.c
1430
cl_old->leaf.q->dev_queue = queue_new;
net/sched/sch_htb.c
1436
qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
net/sched/sch_htb.c
1448
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1454
if (q->offload)
net/sched/sch_htb.c
1464
if (q->offload) {
net/sched/sch_htb.c
1470
*old = qdisc_replace(sch, new, &cl->leaf.q);
net/sched/sch_htb.c
1472
if (q->offload) {
net/sched/sch_htb.c
1483
return !cl->level ? cl->leaf.q : NULL;
net/sched/sch_htb.c
1507
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1510
WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
net/sched/sch_htb.c
1514
&q->hlevel[parent->level].wait_pq);
net/sched/sch_htb.c
1518
parent->leaf.q = new_q ? new_q : &noop_qdisc;
net/sched/sch_htb.c
1523
if (q->offload)
net/sched/sch_htb.c
1546
struct Qdisc *q = cl->leaf.q;
net/sched/sch_htb.c
1553
WARN_ON(!q);
net/sched/sch_htb.c
1564
WARN_ON(old != q);
net/sched/sch_htb.c
1569
u64_stats_read(&q->bstats.bytes),
net/sched/sch_htb.c
1570
u64_stats_read(&q->bstats.packets));
net/sched/sch_htb.c
1606
WARN_ON(!cl->leaf.q);
net/sched/sch_htb.c
1607
qdisc_put(cl->leaf.q);
net/sched/sch_htb.c
1618
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1624
cancel_work_sync(&q->work);
net/sched/sch_htb.c
1625
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_htb.c
1631
tcf_block_put(q->block);
net/sched/sch_htb.c
1633
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_htb.c
1634
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_htb.c
1643
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_htb.c
1644
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
net/sched/sch_htb.c
1648
if (!q->offload) {
net/sched/sch_htb.c
1663
qdisc_class_hash_remove(&q->clhash,
net/sched/sch_htb.c
1675
qdisc_class_hash_destroy(&q->clhash);
net/sched/sch_htb.c
1676
__qdisc_reset_queue(&q->direct_queue);
net/sched/sch_htb.c
1678
if (q->offload) {
net/sched/sch_htb.c
1685
if (!q->direct_qdiscs)
net/sched/sch_htb.c
1687
for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
net/sched/sch_htb.c
1688
qdisc_put(q->direct_qdiscs[i]);
net/sched/sch_htb.c
1689
kfree(q->direct_qdiscs);
net/sched/sch_htb.c
1695
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1713
if (q->offload) {
net/sched/sch_htb.c
1723
if (q->offload)
net/sched/sch_htb.c
1729
if (q->offload)
net/sched/sch_htb.c
1736
qdisc_purge_queue(cl->leaf.q);
net/sched/sch_htb.c
1739
qdisc_class_hash_remove(&q->clhash, &cl->common);
net/sched/sch_htb.c
1743
htb_deactivate(q, cl);
net/sched/sch_htb.c
1747
&q->hlevel[cl->level].wait_pq);
net/sched/sch_htb.c
1763
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1793
if (q->offload) {
net/sched/sch_htb.c
188
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
1886
if (!q->offload) {
net/sched/sch_htb.c
191
clc = qdisc_class_find(&q->clhash, handle);
net/sched/sch_htb.c
1912
WARN_ON(old_q != parent->leaf.q);
net/sched/sch_htb.c
1938
if (q->offload) {
net/sched/sch_htb.c
1949
qdisc_purge_queue(parent->leaf.q);
net/sched/sch_htb.c
1950
parent_qdisc = parent->leaf.q;
net/sched/sch_htb.c
1951
htb_deactivate(q, parent);
net/sched/sch_htb.c
1955
htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
net/sched/sch_htb.c
1964
cl->leaf.q = new_q ? new_q : &noop_qdisc;
net/sched/sch_htb.c
1965
if (q->offload)
net/sched/sch_htb.c
1978
qdisc_class_hash_insert(&q->clhash, &cl->common);
net/sched/sch_htb.c
1981
if (cl->leaf.q != &noop_qdisc)
net/sched/sch_htb.c
1982
qdisc_hash_add(cl->leaf.q, true);
net/sched/sch_htb.c
1994
if (q->offload) {
net/sched/sch_htb.c
2029
do_div(quantum, q->rate2quantum);
net/sched/sch_htb.c
2057
qdisc_class_hash_grow(sch, &q->clhash);
net/sched/sch_htb.c
2074
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
2077
return cl ? cl->block : q->block;
net/sched/sch_htb.c
2108
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
2115
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_htb.c
2116
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_htb.c
222
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
241
tcf = rcu_dereference_bh(q->filter_list);
net/sched/sch_htb.c
272
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
net/sched/sch_htb.c
316
static void htb_add_to_wait_tree(struct htb_sched *q,
net/sched/sch_htb.c
319
struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
net/sched/sch_htb.c
321
cl->pq_key = q->now + delay;
net/sched/sch_htb.c
322
if (cl->pq_key == q->now)
net/sched/sch_htb.c
326
if (q->near_ev_cache[cl->level] > cl->pq_key)
net/sched/sch_htb.c
327
q->near_ev_cache[cl->level] = cl->pq_key;
net/sched/sch_htb.c
339
rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
net/sched/sch_htb.c
364
static inline void htb_add_class_to_row(struct htb_sched *q,
net/sched/sch_htb.c
367
q->row_mask[cl->level] |= mask;
net/sched/sch_htb.c
371
htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
net/sched/sch_htb.c
396
static inline void htb_remove_class_from_row(struct htb_sched *q,
net/sched/sch_htb.c
400
struct htb_level *hlevel = &q->hlevel[cl->level];
net/sched/sch_htb.c
414
q->row_mask[cl->level] &= ~m;
net/sched/sch_htb.c
426
static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
net/sched/sch_htb.c
454
htb_add_class_to_row(q, cl, mask);
net/sched/sch_htb.c
466
static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
net/sched/sch_htb.c
500
htb_remove_class_from_row(q, cl, mask);
net/sched/sch_htb.c
562
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
net/sched/sch_htb.c
571
q->overlimits++;
net/sched/sch_htb.c
576
htb_deactivate_prios(q, cl);
net/sched/sch_htb.c
579
htb_activate_prios(q, cl);
net/sched/sch_htb.c
593
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
net/sched/sch_htb.c
595
WARN_ON(cl->level || !cl->leaf.q);
net/sched/sch_htb.c
599
htb_activate_prios(q, cl);
net/sched/sch_htb.c
611
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
net/sched/sch_htb.c
615
htb_deactivate_prios(q, cl);
net/sched/sch_htb.c
624
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
629
if (q->direct_queue.qlen < q->direct_qlen) {
net/sched/sch_htb.c
630
__qdisc_enqueue_tail(skb, &q->direct_queue);
net/sched/sch_htb.c
631
q->direct_pkts++;
net/sched/sch_htb.c
642
} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
net/sched/sch_htb.c
650
htb_activate(q, cl);
net/sched/sch_htb.c
654
sch->q.qlen++;
net/sched/sch_htb.c
699
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
net/sched/sch_htb.c
707
diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
net/sched/sch_htb.c
717
cl->t_c = q->now;
net/sched/sch_htb.c
721
htb_change_class_mode(q, cl, &diff);
net/sched/sch_htb.c
724
htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
net/sched/sch_htb.c
726
htb_add_to_wait_tree(q, cl, diff);
net/sched/sch_htb.c
747
static s64 htb_do_events(struct htb_sched *q, const int level,
net/sched/sch_htb.c
755
struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
net/sched/sch_htb.c
766
if (cl->pq_key > q->now)
net/sched/sch_htb.c
770
diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
net/sched/sch_htb.c
771
htb_change_class_mode(q, cl, &diff);
net/sched/sch_htb.c
773
htb_add_to_wait_tree(q, cl, diff);
net/sched/sch_htb.c
777
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
net/sched/sch_htb.c
779
q->warned |= HTB_WARN_TOOMANYEVENTS;
net/sched/sch_htb.c
782
return q->now;
net/sched/sch_htb.c
874
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
net/sched/sch_htb.c
879
struct htb_level *hlevel = &q->hlevel[level];
net/sched/sch_htb.c
895
if (unlikely(cl->leaf.q->q.qlen == 0)) {
net/sched/sch_htb.c
897
htb_deactivate(q, cl);
net/sched/sch_htb.c
900
if ((q->row_mask[level] & (1 << prio)) == 0)
net/sched/sch_htb.c
911
skb = cl->leaf.q->dequeue(cl->leaf.q);
net/sched/sch_htb.c
915
qdisc_warn_nonwc("htb", cl->leaf.q);
net/sched/sch_htb.c
917
&q->hlevel[0].hprio[prio].ptr);
net/sched/sch_htb.c
928
&q->hlevel[0].hprio[prio].ptr);
net/sched/sch_htb.c
933
if (!cl->leaf.q->q.qlen)
net/sched/sch_htb.c
934
htb_deactivate(q, cl);
net/sched/sch_htb.c
935
htb_charge_class(q, cl, level, skb);
net/sched/sch_htb.c
943
struct htb_sched *q = qdisc_priv(sch);
net/sched/sch_htb.c
949
skb = __qdisc_dequeue_head(&q->direct_queue);
net/sched/sch_htb.c
954
sch->q.qlen--;
net/sched/sch_htb.c
958
if (!sch->q.qlen)
net/sched/sch_htb.c
960
q->now = ktime_get_ns();
net/sched/sch_htb.c
963
next_event = q->now + 5LLU * NSEC_PER_SEC;
net/sched/sch_htb.c
968
s64 event = q->near_ev_cache[level];
net/sched/sch_htb.c
970
if (q->now >= event) {
net/sched/sch_htb.c
971
event = htb_do_events(q, level, start_at);
net/sched/sch_htb.c
973
event = q->now + NSEC_PER_SEC;
net/sched/sch_htb.c
974
q->near_ev_cache[level] = event;
net/sched/sch_htb.c
980
m = ~q->row_mask[level];
net/sched/sch_htb.c
985
skb = htb_dequeue_tree(q, prio, level);
net/sched/sch_htb.c
990
if (likely(next_event > q->now))
net/sched/sch_htb.c
991
qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
net/sched/sch_htb.c
993
schedule_work(&q->work);
net/sched/sch_ingress.c
100
q->block_info.chain_head_change = clsact_chain_head_change;
net/sched/sch_ingress.c
101
q->block_info.chain_head_change_priv = &q->miniqp;
net/sched/sch_ingress.c
103
err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
net/sched/sch_ingress.c
107
mini_qdisc_pair_block_init(&q->miniqp, q->block);
net/sched/sch_ingress.c
114
struct ingress_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
121
tcf_block_put_ext(q->block, sch, &q->block_info);
net/sched/sch_ingress.c
123
if (mini_qdisc_pair_inited(&q->miniqp)) {
net/sched/sch_ingress.c
203
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
207
return q->ingress_block;
net/sched/sch_ingress.c
209
return q->egress_block;
net/sched/sch_ingress.c
217
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
219
q->ingress_block_info.block_index = block_index;
net/sched/sch_ingress.c
224
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
226
q->egress_block_info.block_index = block_index;
net/sched/sch_ingress.c
231
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
233
return q->ingress_block_info.block_index;
net/sched/sch_ingress.c
238
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
240
return q->egress_block_info.block_index;
net/sched/sch_ingress.c
246
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
262
mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq);
net/sched/sch_ingress.c
266
q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
net/sched/sch_ingress.c
267
q->ingress_block_info.chain_head_change = clsact_chain_head_change;
net/sched/sch_ingress.c
268
q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
net/sched/sch_ingress.c
270
err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
net/sched/sch_ingress.c
275
mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block);
net/sched/sch_ingress.c
281
mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq);
net/sched/sch_ingress.c
285
q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
net/sched/sch_ingress.c
286
q->egress_block_info.chain_head_change = clsact_chain_head_change;
net/sched/sch_ingress.c
287
q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
net/sched/sch_ingress.c
289
return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
net/sched/sch_ingress.c
295
struct clsact_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
301
tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
net/sched/sch_ingress.c
302
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
net/sched/sch_ingress.c
304
if (mini_qdisc_pair_inited(&q->miniqp_ingress)) {
net/sched/sch_ingress.c
313
if (mini_qdisc_pair_inited(&q->miniqp_egress)) {
net/sched/sch_ingress.c
51
struct ingress_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
53
return q->block;
net/sched/sch_ingress.c
65
struct ingress_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
67
q->block_info.block_index = block_index;
net/sched/sch_ingress.c
72
struct ingress_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
74
return q->block_info.block_index;
net/sched/sch_ingress.c
80
struct ingress_sched_data *q = qdisc_priv(sch);
net/sched/sch_ingress.c
95
mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq);
net/sched/sch_ingress.c
99
q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
net/sched/sch_mq.c
149
sch->q.qlen = 0;
net/sched/sch_mq.c
166
sch->q.qlen += qdisc_qlen(qdisc);
net/sched/sch_mqprio.c
561
sch->q.qlen = 0;
net/sched/sch_mqprio.c
578
sch->q.qlen += qdisc_qlen(qdisc);
net/sched/sch_mqprio.c
683
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
net/sched/sch_mqprio.c
684
struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
net/sched/sch_mqprio.c
692
sch->q.qlen += qdisc_qlen(qdisc);
net/sched/sch_multiq.c
104
netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
net/sched/sch_multiq.c
105
qdisc = q->queues[q->curband];
net/sched/sch_multiq.c
109
sch->q.qlen--;
net/sched/sch_multiq.c
120
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
121
unsigned int curband = q->curband;
net/sched/sch_multiq.c
126
for (band = 0; band < q->bands; band++) {
net/sched/sch_multiq.c
129
if (curband >= q->bands)
net/sched/sch_multiq.c
137
qdisc = q->queues[curband];
net/sched/sch_multiq.c
151
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
153
for (band = 0; band < q->bands; band++)
net/sched/sch_multiq.c
154
qdisc_reset(q->queues[band]);
net/sched/sch_multiq.c
155
q->curband = 0;
net/sched/sch_multiq.c
162
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
164
tcf_block_put(q->block);
net/sched/sch_multiq.c
165
for (band = 0; band < q->bands; band++)
net/sched/sch_multiq.c
166
qdisc_put(q->queues[band]);
net/sched/sch_multiq.c
168
kfree(q->queues);
net/sched/sch_multiq.c
174
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
188
removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
net/sched/sch_multiq.c
194
q->bands = qopt->bands;
net/sched/sch_multiq.c
195
for (i = q->bands; i < q->max_bands; i++) {
net/sched/sch_multiq.c
196
if (q->queues[i] != &noop_qdisc) {
net/sched/sch_multiq.c
197
struct Qdisc *child = q->queues[i];
net/sched/sch_multiq.c
199
q->queues[i] = &noop_qdisc;
net/sched/sch_multiq.c
211
for (i = 0; i < q->bands; i++) {
net/sched/sch_multiq.c
212
if (q->queues[i] == &noop_qdisc) {
net/sched/sch_multiq.c
220
old = q->queues[i];
net/sched/sch_multiq.c
221
q->queues[i] = child;
net/sched/sch_multiq.c
238
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
241
q->queues = NULL;
net/sched/sch_multiq.c
246
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_multiq.c
250
q->max_bands = qdisc_dev(sch)->num_tx_queues;
net/sched/sch_multiq.c
252
q->queues = kzalloc_objs(struct Qdisc *, q->max_bands);
net/sched/sch_multiq.c
253
if (!q->queues)
net/sched/sch_multiq.c
255
for (i = 0; i < q->max_bands; i++)
net/sched/sch_multiq.c
256
q->queues[i] = &noop_qdisc;
net/sched/sch_multiq.c
263
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
267
opt.bands = q->bands;
net/sched/sch_multiq.c
268
opt.max_bands = q->max_bands;
net/sched/sch_multiq.c
283
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
289
*old = qdisc_replace(sch, new, &q->queues[band]);
net/sched/sch_multiq.c
296
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
299
return q->queues[band];
net/sched/sch_multiq.c
304
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
307
if (band - 1 >= q->bands)
net/sched/sch_multiq.c
319
static void multiq_unbind(struct Qdisc *q, unsigned long cl)
net/sched/sch_multiq.c
32
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
326
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
329
tcm->tcm_info = q->queues[cl - 1]->handle;
net/sched/sch_multiq.c
336
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
339
cl_q = q->queues[cl - 1];
net/sched/sch_multiq.c
349
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
35
struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_multiq.c
355
for (band = 0; band < q->bands; band++) {
net/sched/sch_multiq.c
364
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
368
return q->block;
net/sched/sch_multiq.c
53
if (band >= q->bands)
net/sched/sch_multiq.c
54
return q->queues[0];
net/sched/sch_multiq.c
56
return q->queues[band];
net/sched/sch_multiq.c
79
sch->q.qlen++;
net/sched/sch_multiq.c
89
struct multiq_sched_data *q = qdisc_priv(sch);
net/sched/sch_multiq.c
94
for (band = 0; band < q->bands; band++) {
net/sched/sch_multiq.c
96
q->curband++;
net/sched/sch_multiq.c
97
if (q->curband >= q->bands)
net/sched/sch_multiq.c
98
q->curband = 0;
net/sched/sch_netem.c
1000
((struct netem_sched_data *)qdisc_priv(q))->duplicate)
net/sched/sch_netem.c
1017
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1045
old_clg = q->clg;
net/sched/sch_netem.c
1046
old_loss_model = q->loss_model;
net/sched/sch_netem.c
1049
ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
net/sched/sch_netem.c
1051
q->loss_model = old_loss_model;
net/sched/sch_netem.c
1052
q->clg = old_clg;
net/sched/sch_netem.c
1056
q->loss_model = CLG_RANDOM;
net/sched/sch_netem.c
1060
swap(q->delay_dist, delay_dist);
net/sched/sch_netem.c
1062
swap(q->slot_dist, slot_dist);
net/sched/sch_netem.c
1065
q->latency = PSCHED_TICKS2NS(qopt->latency);
net/sched/sch_netem.c
1066
q->jitter = PSCHED_TICKS2NS(qopt->jitter);
net/sched/sch_netem.c
1067
q->limit = qopt->limit;
net/sched/sch_netem.c
1068
q->gap = qopt->gap;
net/sched/sch_netem.c
1069
q->counter = 0;
net/sched/sch_netem.c
1070
q->loss = qopt->loss;
net/sched/sch_netem.c
1076
q->duplicate = qopt->duplicate;
net/sched/sch_netem.c
1081
if (q->gap)
net/sched/sch_netem.c
1082
q->reorder = ~0;
net/sched/sch_netem.c
1085
get_correlation(q, tb[TCA_NETEM_CORR]);
net/sched/sch_netem.c
1088
get_reorder(q, tb[TCA_NETEM_REORDER]);
net/sched/sch_netem.c
1091
get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
net/sched/sch_netem.c
1094
get_rate(q, tb[TCA_NETEM_RATE]);
net/sched/sch_netem.c
1097
q->rate = max_t(u64, q->rate,
net/sched/sch_netem.c
1101
q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
net/sched/sch_netem.c
1104
q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
net/sched/sch_netem.c
1107
q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
net/sched/sch_netem.c
1110
get_slot(q, tb[TCA_NETEM_SLOT]);
net/sched/sch_netem.c
1113
q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
net/sched/sch_netem.c
1116
q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]);
net/sched/sch_netem.c
1118
q->prng.seed = get_random_u64();
net/sched/sch_netem.c
1119
prandom_seed_state(&q->prng.prng_state, q->prng.seed);
net/sched/sch_netem.c
1133
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1136
qdisc_watchdog_init(&q->watchdog, sch);
net/sched/sch_netem.c
1141
q->loss_model = CLG_RANDOM;
net/sched/sch_netem.c
1150
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1152
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_netem.c
1153
if (q->qdisc)
net/sched/sch_netem.c
1154
qdisc_put(q->qdisc);
net/sched/sch_netem.c
1155
dist_free(q->delay_dist);
net/sched/sch_netem.c
1156
dist_free(q->slot_dist);
net/sched/sch_netem.c
1159
static int dump_loss_model(const struct netem_sched_data *q,
net/sched/sch_netem.c
1168
switch (q->loss_model) {
net/sched/sch_netem.c
1176
.p13 = q->clg.a1,
net/sched/sch_netem.c
1177
.p31 = q->clg.a2,
net/sched/sch_netem.c
1178
.p32 = q->clg.a3,
net/sched/sch_netem.c
1179
.p14 = q->clg.a4,
net/sched/sch_netem.c
1180
.p23 = q->clg.a5,
net/sched/sch_netem.c
1189
.p = q->clg.a1,
net/sched/sch_netem.c
1190
.r = q->clg.a2,
net/sched/sch_netem.c
1191
.h = q->clg.a3,
net/sched/sch_netem.c
1192
.k1 = q->clg.a4,
net/sched/sch_netem.c
1211
const struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1220
qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
net/sched/sch_netem.c
1222
qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
net/sched/sch_netem.c
1224
qopt.limit = q->limit;
net/sched/sch_netem.c
1225
qopt.loss = q->loss;
net/sched/sch_netem.c
1226
qopt.gap = q->gap;
net/sched/sch_netem.c
1227
qopt.duplicate = q->duplicate;
net/sched/sch_netem.c
1231
if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
net/sched/sch_netem.c
1234
if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
net/sched/sch_netem.c
1237
cor.delay_corr = q->delay_cor.rho;
net/sched/sch_netem.c
1238
cor.loss_corr = q->loss_cor.rho;
net/sched/sch_netem.c
1239
cor.dup_corr = q->dup_cor.rho;
net/sched/sch_netem.c
1243
reorder.probability = q->reorder;
net/sched/sch_netem.c
1244
reorder.correlation = q->reorder_cor.rho;
net/sched/sch_netem.c
1248
corrupt.probability = q->corrupt;
net/sched/sch_netem.c
1249
corrupt.correlation = q->corrupt_cor.rho;
net/sched/sch_netem.c
1253
if (q->rate >= (1ULL << 32)) {
net/sched/sch_netem.c
1254
if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
net/sched/sch_netem.c
1259
rate.rate = q->rate;
net/sched/sch_netem.c
1261
rate.packet_overhead = q->packet_overhead;
net/sched/sch_netem.c
1262
rate.cell_size = q->cell_size;
net/sched/sch_netem.c
1263
rate.cell_overhead = q->cell_overhead;
net/sched/sch_netem.c
1267
if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
net/sched/sch_netem.c
1270
if (dump_loss_model(q, skb) != 0)
net/sched/sch_netem.c
1273
if (q->slot_config.min_delay | q->slot_config.max_delay |
net/sched/sch_netem.c
1274
q->slot_config.dist_jitter) {
net/sched/sch_netem.c
1275
slot = q->slot_config;
net/sched/sch_netem.c
1284
if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed,
net/sched/sch_netem.c
1298
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1300
if (cl != 1 || !q->qdisc) /* only one class */
net/sched/sch_netem.c
1304
tcm->tcm_info = q->qdisc->handle;
net/sched/sch_netem.c
1312
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1314
*old = qdisc_replace(sch, new, &q->qdisc);
net/sched/sch_netem.c
1320
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
1321
return q->qdisc;
net/sched/sch_netem.c
210
static bool loss_4state(struct netem_sched_data *q)
net/sched/sch_netem.c
212
struct clgstate *clg = &q->clg;
net/sched/sch_netem.c
213
u32 rnd = prandom_u32_state(&q->prng.prng_state);
net/sched/sch_netem.c
275
static bool loss_gilb_ell(struct netem_sched_data *q)
net/sched/sch_netem.c
277
struct clgstate *clg = &q->clg;
net/sched/sch_netem.c
278
struct rnd_state *s = &q->prng.prng_state;
net/sched/sch_netem.c
297
static bool loss_event(struct netem_sched_data *q)
net/sched/sch_netem.c
299
switch (q->loss_model) {
net/sched/sch_netem.c
302
return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
net/sched/sch_netem.c
310
return loss_4state(q);
net/sched/sch_netem.c
318
return loss_gilb_ell(q);
net/sched/sch_netem.c
357
static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
net/sched/sch_netem.c
359
len += q->packet_overhead;
net/sched/sch_netem.c
361
if (q->cell_size) {
net/sched/sch_netem.c
362
u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
net/sched/sch_netem.c
364
if (len > cells * q->cell_size) /* extra cell needed for remainder */
net/sched/sch_netem.c
366
len = cells * (q->cell_size + q->cell_overhead);
net/sched/sch_netem.c
369
return div64_u64(len * NSEC_PER_SEC, q->rate);
net/sched/sch_netem.c
374
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
375
struct rb_node *p = rb_first(&q->t_root);
net/sched/sch_netem.c
381
rb_erase(&skb->rbnode, &q->t_root);
net/sched/sch_netem.c
385
rtnl_kfree_skbs(q->t_head, q->t_tail);
net/sched/sch_netem.c
386
q->t_head = NULL;
net/sched/sch_netem.c
387
q->t_tail = NULL;
net/sched/sch_netem.c
388
q->t_len = 0;
net/sched/sch_netem.c
393
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
396
if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
net/sched/sch_netem.c
397
if (q->t_tail)
net/sched/sch_netem.c
398
q->t_tail->next = nskb;
net/sched/sch_netem.c
400
q->t_head = nskb;
net/sched/sch_netem.c
401
q->t_tail = nskb;
net/sched/sch_netem.c
403
struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
net/sched/sch_netem.c
416
rb_insert_color(&nskb->rbnode, &q->t_root);
net/sched/sch_netem.c
418
q->t_len++;
net/sched/sch_netem.c
419
sch->q.qlen++;
net/sched/sch_netem.c
452
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
464
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
net/sched/sch_netem.c
468
if (loss_event(q)) {
net/sched/sch_netem.c
469
if (q->ecn && INET_ECN_set_ce(skb))
net/sched/sch_netem.c
483
if (q->latency || q->jitter || q->rate)
net/sched/sch_netem.c
499
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
net/sched/sch_netem.c
527
if (unlikely(q->t_len >= sch->limit)) {
net/sched/sch_netem.c
543
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
net/sched/sch_netem.c
545
q->duplicate = 0;
net/sched/sch_netem.c
547
q->duplicate = dupsave;
net/sched/sch_netem.c
554
if (q->gap == 0 || /* not doing reordering */
net/sched/sch_netem.c
555
q->counter < q->gap - 1 || /* inside last reordering gap */
net/sched/sch_netem.c
556
q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
net/sched/sch_netem.c
560
delay = tabledist(q->latency, q->jitter,
net/sched/sch_netem.c
561
&q->delay_cor, &q->prng, q->delay_dist);
net/sched/sch_netem.c
565
if (q->rate) {
net/sched/sch_netem.c
568
if (sch->q.tail)
net/sched/sch_netem.c
569
last = netem_skb_cb(sch->q.tail);
net/sched/sch_netem.c
570
if (q->t_root.rb_node) {
net/sched/sch_netem.c
574
t_skb = skb_rb_last(&q->t_root);
net/sched/sch_netem.c
580
if (q->t_tail) {
net/sched/sch_netem.c
582
netem_skb_cb(q->t_tail);
net/sched/sch_netem.c
600
delay += packet_time_ns(qdisc_pkt_len(skb), q);
net/sched/sch_netem.c
604
++q->counter;
net/sched/sch_netem.c
612
q->counter = 0;
net/sched/sch_netem.c
614
__qdisc_enqueue_head(skb, &sch->q);
net/sched/sch_netem.c
656
static void get_slot_next(struct netem_sched_data *q, u64 now)
net/sched/sch_netem.c
660
if (!q->slot_dist)
net/sched/sch_netem.c
661
next_delay = q->slot_config.min_delay +
net/sched/sch_netem.c
663
(q->slot_config.max_delay -
net/sched/sch_netem.c
664
q->slot_config.min_delay) >> 32);
net/sched/sch_netem.c
666
next_delay = tabledist(q->slot_config.dist_delay,
net/sched/sch_netem.c
667
(s32)(q->slot_config.dist_jitter),
net/sched/sch_netem.c
668
NULL, &q->prng, q->slot_dist);
net/sched/sch_netem.c
670
q->slot.slot_next = now + next_delay;
net/sched/sch_netem.c
671
q->slot.packets_left = q->slot_config.max_packets;
net/sched/sch_netem.c
672
q->slot.bytes_left = q->slot_config.max_bytes;
net/sched/sch_netem.c
675
static struct sk_buff *netem_peek(struct netem_sched_data *q)
net/sched/sch_netem.c
677
struct sk_buff *skb = skb_rb_first(&q->t_root);
net/sched/sch_netem.c
681
return q->t_head;
net/sched/sch_netem.c
682
if (!q->t_head)
net/sched/sch_netem.c
686
t2 = netem_skb_cb(q->t_head)->time_to_send;
net/sched/sch_netem.c
689
return q->t_head;
net/sched/sch_netem.c
692
static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
net/sched/sch_netem.c
694
if (skb == q->t_head) {
net/sched/sch_netem.c
695
q->t_head = skb->next;
net/sched/sch_netem.c
696
if (!q->t_head)
net/sched/sch_netem.c
697
q->t_tail = NULL;
net/sched/sch_netem.c
699
rb_erase(&skb->rbnode, &q->t_root);
net/sched/sch_netem.c
705
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
709
skb = __qdisc_dequeue_head(&sch->q);
net/sched/sch_netem.c
716
skb = netem_peek(q);
net/sched/sch_netem.c
723
if (q->slot.slot_next && q->slot.slot_next < time_to_send)
net/sched/sch_netem.c
724
get_slot_next(q, now);
net/sched/sch_netem.c
726
if (time_to_send <= now && q->slot.slot_next <= now) {
net/sched/sch_netem.c
727
netem_erase_head(q, skb);
net/sched/sch_netem.c
728
q->t_len--;
net/sched/sch_netem.c
736
if (q->slot.slot_next) {
net/sched/sch_netem.c
737
q->slot.packets_left--;
net/sched/sch_netem.c
738
q->slot.bytes_left -= qdisc_pkt_len(skb);
net/sched/sch_netem.c
739
if (q->slot.packets_left <= 0 ||
net/sched/sch_netem.c
740
q->slot.bytes_left <= 0)
net/sched/sch_netem.c
741
get_slot_next(q, now);
net/sched/sch_netem.c
744
if (q->qdisc) {
net/sched/sch_netem.c
749
err = qdisc_enqueue(skb, q->qdisc, &to_free);
net/sched/sch_netem.c
755
sch->q.qlen--;
net/sched/sch_netem.c
760
sch->q.qlen--;
net/sched/sch_netem.c
764
if (q->qdisc) {
net/sched/sch_netem.c
765
skb = q->qdisc->ops->dequeue(q->qdisc);
net/sched/sch_netem.c
767
sch->q.qlen--;
net/sched/sch_netem.c
772
qdisc_watchdog_schedule_ns(&q->watchdog,
net/sched/sch_netem.c
774
q->slot.slot_next));
net/sched/sch_netem.c
777
if (q->qdisc) {
net/sched/sch_netem.c
778
skb = q->qdisc->ops->dequeue(q->qdisc);
net/sched/sch_netem.c
780
sch->q.qlen--;
net/sched/sch_netem.c
789
struct netem_sched_data *q = qdisc_priv(sch);
net/sched/sch_netem.c
793
if (q->qdisc)
net/sched/sch_netem.c
794
qdisc_reset(q->qdisc);
net/sched/sch_netem.c
795
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_netem.c
830
static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
net/sched/sch_netem.c
834
q->slot_config = *c;
net/sched/sch_netem.c
835
if (q->slot_config.max_packets == 0)
net/sched/sch_netem.c
836
q->slot_config.max_packets = INT_MAX;
net/sched/sch_netem.c
837
if (q->slot_config.max_bytes == 0)
net/sched/sch_netem.c
838
q->slot_config.max_bytes = INT_MAX;
net/sched/sch_netem.c
841
q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
net/sched/sch_netem.c
843
q->slot.packets_left = q->slot_config.max_packets;
net/sched/sch_netem.c
844
q->slot.bytes_left = q->slot_config.max_bytes;
net/sched/sch_netem.c
845
if (q->slot_config.min_delay | q->slot_config.max_delay |
net/sched/sch_netem.c
846
q->slot_config.dist_jitter)
net/sched/sch_netem.c
847
q->slot.slot_next = ktime_get_ns();
net/sched/sch_netem.c
849
q->slot.slot_next = 0;
net/sched/sch_netem.c
852
static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
net/sched/sch_netem.c
856
init_crandom(&q->delay_cor, c->delay_corr);
net/sched/sch_netem.c
857
init_crandom(&q->loss_cor, c->loss_corr);
net/sched/sch_netem.c
858
init_crandom(&q->dup_cor, c->dup_corr);
net/sched/sch_netem.c
861
static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
net/sched/sch_netem.c
865
q->reorder = r->probability;
net/sched/sch_netem.c
866
init_crandom(&q->reorder_cor, r->correlation);
net/sched/sch_netem.c
869
static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
net/sched/sch_netem.c
873
q->corrupt = r->probability;
net/sched/sch_netem.c
874
init_crandom(&q->corrupt_cor, r->correlation);
net/sched/sch_netem.c
877
static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
net/sched/sch_netem.c
881
q->rate = r->rate;
net/sched/sch_netem.c
882
q->packet_overhead = r->packet_overhead;
net/sched/sch_netem.c
883
q->cell_size = r->cell_size;
net/sched/sch_netem.c
884
q->cell_overhead = r->cell_overhead;
net/sched/sch_netem.c
885
if (q->cell_size)
net/sched/sch_netem.c
886
q->cell_size_reciprocal = reciprocal_value(q->cell_size);
net/sched/sch_netem.c
888
q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
net/sched/sch_netem.c
891
static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
net/sched/sch_netem.c
908
q->loss_model = CLG_4_STATES;
net/sched/sch_netem.c
910
q->clg.state = TX_IN_GAP_PERIOD;
net/sched/sch_netem.c
911
q->clg.a1 = gi->p13;
net/sched/sch_netem.c
912
q->clg.a2 = gi->p31;
net/sched/sch_netem.c
913
q->clg.a3 = gi->p32;
net/sched/sch_netem.c
914
q->clg.a4 = gi->p14;
net/sched/sch_netem.c
915
q->clg.a5 = gi->p23;
net/sched/sch_netem.c
927
q->loss_model = CLG_GILB_ELL;
net/sched/sch_netem.c
928
q->clg.state = GOOD_STATE;
net/sched/sch_netem.c
929
q->clg.a1 = ge->p;
net/sched/sch_netem.c
930
q->clg.a2 = ge->r;
net/sched/sch_netem.c
931
q->clg.a3 = ge->h;
net/sched/sch_netem.c
932
q->clg.a4 = ge->k1;
net/sched/sch_netem.c
983
struct Qdisc *root, *q;
net/sched/sch_netem.c
997
hash_for_each(qdisc_dev(root)->qdisc_hash, i, q, hash) {
net/sched/sch_netem.c
998
if (sch != q && q->ops->cl_ops == &netem_class_ops) {
net/sched/sch_pie.c
102
} else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
net/sched/sch_pie.c
107
q->stats.ecn_mark++;
net/sched/sch_pie.c
114
if (!q->params.dq_rate_estimator)
net/sched/sch_pie.c
117
q->stats.packets_in++;
net/sched/sch_pie.c
118
if (qdisc_qlen(sch) > q->stats.maxq)
net/sched/sch_pie.c
119
q->stats.maxq = qdisc_qlen(sch);
net/sched/sch_pie.c
125
q->stats.dropped++;
net/sched/sch_pie.c
126
q->vars.accu_prob = 0;
net/sched/sch_pie.c
145
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
162
WRITE_ONCE(q->params.target,
net/sched/sch_pie.c
168
WRITE_ONCE(q->params.tupdate,
net/sched/sch_pie.c
174
WRITE_ONCE(q->params.limit, limit);
net/sched/sch_pie.c
179
WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA]));
net/sched/sch_pie.c
182
WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA]));
net/sched/sch_pie.c
185
WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN]));
net/sched/sch_pie.c
188
WRITE_ONCE(q->params.bytemode,
net/sched/sch_pie.c
192
WRITE_ONCE(q->params.dq_rate_estimator,
net/sched/sch_pie.c
196
while (sch->q.qlen > sch->limit) {
net/sched/sch_pie.c
429
struct pie_sched_data *q = timer_container_of(q, t, adapt_timer);
net/sched/sch_pie.c
430
struct Qdisc *sch = q->sch;
net/sched/sch_pie.c
436
pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
net/sched/sch_pie.c
439
if (q->params.tupdate)
net/sched/sch_pie.c
440
mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
net/sched/sch_pie.c
448
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
450
pie_params_init(&q->params);
net/sched/sch_pie.c
451
pie_vars_init(&q->vars);
net/sched/sch_pie.c
452
sch->limit = q->params.limit;
net/sched/sch_pie.c
454
q->sch = sch;
net/sched/sch_pie.c
455
timer_setup(&q->adapt_timer, pie_timer, 0);
net/sched/sch_pie.c
464
mod_timer(&q->adapt_timer, jiffies + HZ / 2);
net/sched/sch_pie.c
470
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
479
((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) /
net/sched/sch_pie.c
483
jiffies_to_usecs(READ_ONCE(q->params.tupdate))) ||
net/sched/sch_pie.c
484
nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) ||
net/sched/sch_pie.c
485
nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) ||
net/sched/sch_pie.c
486
nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
net/sched/sch_pie.c
488
READ_ONCE(q->params.bytemode)) ||
net/sched/sch_pie.c
490
READ_ONCE(q->params.dq_rate_estimator)))
net/sched/sch_pie.c
502
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
504
.prob = q->vars.prob << BITS_PER_BYTE,
net/sched/sch_pie.c
505
.delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
net/sched/sch_pie.c
507
.packets_in = q->stats.packets_in,
net/sched/sch_pie.c
508
.overlimit = q->stats.overlimit,
net/sched/sch_pie.c
509
.maxq = q->stats.maxq,
net/sched/sch_pie.c
510
.dropped = q->stats.dropped,
net/sched/sch_pie.c
511
.ecn_mark = q->stats.ecn_mark,
net/sched/sch_pie.c
515
st.dq_rate_estimating = q->params.dq_rate_estimator;
net/sched/sch_pie.c
518
if (q->params.dq_rate_estimator)
net/sched/sch_pie.c
519
st.avg_dq_rate = q->vars.avg_dq_rate *
net/sched/sch_pie.c
527
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
533
pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
net/sched/sch_pie.c
539
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
542
pie_vars_init(&q->vars);
net/sched/sch_pie.c
547
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
549
q->params.tupdate = 0;
net/sched/sch_pie.c
550
timer_delete_sync(&q->adapt_timer);
net/sched/sch_pie.c
89
struct pie_sched_data *q = qdisc_priv(sch);
net/sched/sch_pie.c
93
q->stats.overlimit++;
net/sched/sch_pie.c
99
if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
net/sched/sch_plug.c
103
struct plug_sched_data *q = qdisc_priv(sch);
net/sched/sch_plug.c
105
if (q->throttled)
net/sched/sch_plug.c
108
if (!q->unplug_indefinite) {
net/sched/sch_plug.c
109
if (!q->pkts_to_release) {
net/sched/sch_plug.c
113
q->throttled = true;
net/sched/sch_plug.c
116
q->pkts_to_release--;
net/sched/sch_plug.c
125
struct plug_sched_data *q = qdisc_priv(sch);
net/sched/sch_plug.c
127
q->pkts_current_epoch = 0;
net/sched/sch_plug.c
128
q->pkts_last_epoch = 0;
net/sched/sch_plug.c
129
q->pkts_to_release = 0;
net/sched/sch_plug.c
130
q->unplug_indefinite = false;
net/sched/sch_plug.c
133
q->limit = qdisc_dev(sch)->tx_queue_len
net/sched/sch_plug.c
141
q->limit = ctl->limit;
net/sched/sch_plug.c
144
q->throttled = true;
net/sched/sch_plug.c
161
struct plug_sched_data *q = qdisc_priv(sch);
net/sched/sch_plug.c
171
q->pkts_last_epoch = q->pkts_current_epoch;
net/sched/sch_plug.c
172
q->pkts_current_epoch = 0;
net/sched/sch_plug.c
173
if (q->unplug_indefinite)
net/sched/sch_plug.c
174
q->throttled = true;
net/sched/sch_plug.c
175
q->unplug_indefinite = false;
net/sched/sch_plug.c
181
q->pkts_to_release += q->pkts_last_epoch;
net/sched/sch_plug.c
182
q->pkts_last_epoch = 0;
net/sched/sch_plug.c
183
q->throttled = false;
net/sched/sch_plug.c
187
q->unplug_indefinite = true;
net/sched/sch_plug.c
188
q->pkts_to_release = 0;
net/sched/sch_plug.c
189
q->pkts_last_epoch = 0;
net/sched/sch_plug.c
190
q->pkts_current_epoch = 0;
net/sched/sch_plug.c
191
q->throttled = false;
net/sched/sch_plug.c
196
q->limit = msg->limit;
net/sched/sch_plug.c
90
struct plug_sched_data *q = qdisc_priv(sch);
net/sched/sch_plug.c
92
if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
net/sched/sch_plug.c
93
if (!q->unplug_indefinite)
net/sched/sch_plug.c
94
q->pkts_current_epoch++;
net/sched/sch_prio.c
102
for (prio = 0; prio < q->bands; prio++) {
net/sched/sch_prio.c
103
struct Qdisc *qdisc = q->queues[prio];
net/sched/sch_prio.c
113
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
116
for (prio = 0; prio < q->bands; prio++) {
net/sched/sch_prio.c
117
struct Qdisc *qdisc = q->queues[prio];
net/sched/sch_prio.c
122
sch->q.qlen--;
net/sched/sch_prio.c
134
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
136
for (prio = 0; prio < q->bands; prio++)
net/sched/sch_prio.c
137
qdisc_reset(q->queues[prio]);
net/sched/sch_prio.c
168
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
170
tcf_block_put(q->block);
net/sched/sch_prio.c
172
for (prio = 0; prio < q->bands; prio++)
net/sched/sch_prio.c
173
qdisc_put(q->queues[prio]);
net/sched/sch_prio.c
179
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
181
int oldbands = q->bands, i;
net/sched/sch_prio.c
210
q->bands = qopt->bands;
net/sched/sch_prio.c
211
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
net/sched/sch_prio.c
213
for (i = q->bands; i < oldbands; i++)
net/sched/sch_prio.c
214
qdisc_purge_queue(q->queues[i]);
net/sched/sch_prio.c
216
for (i = oldbands; i < q->bands; i++) {
net/sched/sch_prio.c
217
q->queues[i] = queues[i];
net/sched/sch_prio.c
218
if (q->queues[i] != &noop_qdisc)
net/sched/sch_prio.c
219
qdisc_hash_add(q->queues[i], true);
net/sched/sch_prio.c
224
for (i = q->bands; i < oldbands; i++)
net/sched/sch_prio.c
225
qdisc_put(q->queues[i]);
net/sched/sch_prio.c
232
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
238
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_prio.c
264
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
269
opt.bands = q->bands;
net/sched/sch_prio.c
270
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
net/sched/sch_prio.c
289
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
302
*old = qdisc_replace(sch, new, &q->queues[band]);
net/sched/sch_prio.c
319
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
322
return q->queues[band];
net/sched/sch_prio.c
327
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
33
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
330
if (band - 1 >= q->bands)
net/sched/sch_prio.c
341
static void prio_unbind(struct Qdisc *q, unsigned long cl)
net/sched/sch_prio.c
348
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
351
tcm->tcm_info = q->queues[cl-1]->handle;
net/sched/sch_prio.c
358
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
361
cl_q = q->queues[cl - 1];
net/sched/sch_prio.c
372
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
378
for (prio = 0; prio < q->bands; prio++) {
net/sched/sch_prio.c
387
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_prio.c
391
return q->block;
net/sched/sch_prio.c
41
fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_prio.c
57
return q->queues[q->prio2band[band & TC_PRIO_MAX]];
net/sched/sch_prio.c
62
if (band >= q->bands)
net/sched/sch_prio.c
63
return q->queues[q->prio2band[0]];
net/sched/sch_prio.c
65
return q->queues[band];
net/sched/sch_prio.c
89
sch->q.qlen++;
net/sched/sch_prio.c
99
struct prio_sched_data *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1002
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
net/sched/sch_qfq.c
1054
static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
net/sched/sch_qfq.c
1061
limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
net/sched/sch_qfq.c
1063
if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
net/sched/sch_qfq.c
1065
mask = mask_from(q->bitmaps[ER], agg->grp->index);
net/sched/sch_qfq.c
1067
struct qfq_group *next = qfq_ffs(q, mask);
net/sched/sch_qfq.c
1076
agg->S = q->V;
net/sched/sch_qfq.c
1087
qfq_update_agg_ts(struct qfq_sched *q,
net/sched/sch_qfq.c
1091
qfq_update_start(q, agg);
net/sched/sch_qfq.c
1098
static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
net/sched/sch_qfq.c
1102
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1103
struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
net/sched/sch_qfq.c
1138
qfq_update_agg_ts(q, in_serv_agg, requeue);
net/sched/sch_qfq.c
1139
qfq_schedule_agg(q, in_serv_agg);
net/sched/sch_qfq.c
1140
} else if (sch->q.qlen == 0) { /* no aggregate to serve */
net/sched/sch_qfq.c
1141
q->in_serv_agg = NULL;
net/sched/sch_qfq.c
1149
in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
net/sched/sch_qfq.c
1155
sch->q.qlen--;
net/sched/sch_qfq.c
1160
sch->q.qlen++;
net/sched/sch_qfq.c
1176
q->V += (u64)len * q->iwsum;
net/sched/sch_qfq.c
1179
(unsigned long long) q->V);
net/sched/sch_qfq.c
1184
static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
net/sched/sch_qfq.c
1190
qfq_update_eligible(q);
net/sched/sch_qfq.c
1191
q->oldV = q->V;
net/sched/sch_qfq.c
1193
if (!q->bitmaps[ER])
net/sched/sch_qfq.c
1196
grp = qfq_ffs(q, q->bitmaps[ER]);
net/sched/sch_qfq.c
1207
__clear_bit(grp->index, &q->bitmaps[ER]);
net/sched/sch_qfq.c
1217
__clear_bit(grp->index, &q->bitmaps[ER]);
net/sched/sch_qfq.c
1218
s = qfq_calc_state(q, grp);
net/sched/sch_qfq.c
1219
__set_bit(grp->index, &q->bitmaps[s]);
net/sched/sch_qfq.c
1222
qfq_unblock_groups(q, grp->index, old_F);
net/sched/sch_qfq.c
1231
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1268
++sch->q.qlen;
net/sched/sch_qfq.c
1286
q->in_serv_agg == agg)
net/sched/sch_qfq.c
1289
qfq_activate_agg(q, agg, enqueue);
net/sched/sch_qfq.c
1297
static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
net/sched/sch_qfq.c
1321
__clear_bit(grp->index, &q->bitmaps[IR]);
net/sched/sch_qfq.c
1322
__clear_bit(grp->index, &q->bitmaps[IB]);
net/sched/sch_qfq.c
1323
} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
net/sched/sch_qfq.c
1324
q->in_serv_agg == NULL)
net/sched/sch_qfq.c
1325
q->V = roundedS;
net/sched/sch_qfq.c
1329
s = qfq_calc_state(q, grp);
net/sched/sch_qfq.c
1330
__set_bit(grp->index, &q->bitmaps[s]);
net/sched/sch_qfq.c
1333
s, q->bitmaps[s],
net/sched/sch_qfq.c
1336
(unsigned long long) q->V);
net/sched/sch_qfq.c
1344
static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
net/sched/sch_qfq.c
1349
qfq_update_agg_ts(q, agg, reason);
net/sched/sch_qfq.c
1350
if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
net/sched/sch_qfq.c
1351
q->in_serv_agg = agg; /* start serving this aggregate */
net/sched/sch_qfq.c
1353
q->oldV = q->V = agg->S;
net/sched/sch_qfq.c
1354
} else if (agg != q->in_serv_agg)
net/sched/sch_qfq.c
1355
qfq_schedule_agg(q, agg);
net/sched/sch_qfq.c
1358
static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
net/sched/sch_qfq.c
1381
static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
net/sched/sch_qfq.c
1388
if (agg == q->in_serv_agg) {
net/sched/sch_qfq.c
1390
q->in_serv_agg = qfq_choose_next_agg(q);
net/sched/sch_qfq.c
1395
qfq_slot_remove(q, grp, agg);
net/sched/sch_qfq.c
1398
__clear_bit(grp->index, &q->bitmaps[IR]);
net/sched/sch_qfq.c
1399
__clear_bit(grp->index, &q->bitmaps[EB]);
net/sched/sch_qfq.c
1400
__clear_bit(grp->index, &q->bitmaps[IB]);
net/sched/sch_qfq.c
1402
if (test_bit(grp->index, &q->bitmaps[ER]) &&
net/sched/sch_qfq.c
1403
!(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
net/sched/sch_qfq.c
1404
mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
net/sched/sch_qfq.c
1409
qfq_move_groups(q, mask, EB, ER);
net/sched/sch_qfq.c
1410
qfq_move_groups(q, mask, IB, IR);
net/sched/sch_qfq.c
1412
__clear_bit(grp->index, &q->bitmaps[ER]);
net/sched/sch_qfq.c
1417
__clear_bit(grp->index, &q->bitmaps[ER]);
net/sched/sch_qfq.c
1418
__clear_bit(grp->index, &q->bitmaps[IR]);
net/sched/sch_qfq.c
1419
__clear_bit(grp->index, &q->bitmaps[EB]);
net/sched/sch_qfq.c
1420
__clear_bit(grp->index, &q->bitmaps[IB]);
net/sched/sch_qfq.c
1423
s = qfq_calc_state(q, grp);
net/sched/sch_qfq.c
1424
__set_bit(grp->index, &q->bitmaps[s]);
net/sched/sch_qfq.c
1431
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1436
qfq_deactivate_class(q, cl);
net/sched/sch_qfq.c
1442
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1447
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_qfq.c
1451
err = qdisc_class_hash_init(&q->clhash);
net/sched/sch_qfq.c
1459
q->max_agg_classes = 1<<max_cl_shift;
net/sched/sch_qfq.c
1463
q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
net/sched/sch_qfq.c
1466
grp = &q->groups[i];
net/sched/sch_qfq.c
1468
grp->slot_shift = q->min_slot_shift + i;
net/sched/sch_qfq.c
1473
INIT_HLIST_HEAD(&q->nonfull_aggs);
net/sched/sch_qfq.c
1480
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1484
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_qfq.c
1485
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_qfq.c
1487
qfq_deactivate_class(q, cl);
net/sched/sch_qfq.c
1496
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
1501
tcf_block_put(q->block);
net/sched/sch_qfq.c
1503
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_qfq.c
1504
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
net/sched/sch_qfq.c
1506
qfq_rm_from_agg(q, cl);
net/sched/sch_qfq.c
1510
qdisc_class_hash_destroy(&q->clhash);
net/sched/sch_qfq.c
212
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
215
clc = qdisc_class_find(&q->clhash, classid);
net/sched/sch_qfq.c
262
static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
net/sched/sch_qfq.c
266
hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
net/sched/sch_qfq.c
272
static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
net/sched/sch_qfq.c
277
hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
net/sched/sch_qfq.c
286
static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
net/sched/sch_qfq.c
291
if (new_num_classes == q->max_agg_classes)
net/sched/sch_qfq.c
295
new_num_classes == q->max_agg_classes - 1) /* agg no more full */
net/sched/sch_qfq.c
296
hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
net/sched/sch_qfq.c
308
q->min_slot_shift);
net/sched/sch_qfq.c
309
agg->grp = &q->groups[i];
net/sched/sch_qfq.c
312
q->wsum +=
net/sched/sch_qfq.c
314
q->iwsum = ONE_FP / q->wsum;
net/sched/sch_qfq.c
320
static void qfq_add_to_agg(struct qfq_sched *q,
net/sched/sch_qfq.c
326
qfq_update_agg(q, agg, agg->num_classes+1);
net/sched/sch_qfq.c
327
if (cl->qdisc->q.qlen > 0) { /* adding an active class */
net/sched/sch_qfq.c
330
cl && q->in_serv_agg != agg) /* agg was inactive */
net/sched/sch_qfq.c
331
qfq_activate_agg(q, agg, enqueue); /* schedule agg */
net/sched/sch_qfq.c
337
static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
net/sched/sch_qfq.c
340
q->wsum -= agg->class_weight;
net/sched/sch_qfq.c
341
if (q->wsum != 0)
net/sched/sch_qfq.c
342
q->iwsum = ONE_FP / q->wsum;
net/sched/sch_qfq.c
344
if (q->in_serv_agg == agg)
net/sched/sch_qfq.c
345
q->in_serv_agg = qfq_choose_next_agg(q);
net/sched/sch_qfq.c
350
static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
net/sched/sch_qfq.c
357
qfq_deactivate_agg(q, agg);
net/sched/sch_qfq.c
361
static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
net/sched/sch_qfq.c
367
qfq_destroy_agg(q, agg);
net/sched/sch_qfq.c
370
qfq_update_agg(q, agg, agg->num_classes-1);
net/sched/sch_qfq.c
374
static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
net/sched/sch_qfq.c
377
qfq_deactivate_class(q, cl);
net/sched/sch_qfq.c
379
qfq_rm_from_agg(q, cl);
net/sched/sch_qfq.c
386
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
393
new_agg = qfq_find_agg(q, lmax, weight);
net/sched/sch_qfq.c
398
qfq_init_agg(q, new_agg, lmax, weight);
net/sched/sch_qfq.c
400
qfq_deact_rm_from_agg(q, cl);
net/sched/sch_qfq.c
401
qfq_add_to_agg(q, new_agg, cl);
net/sched/sch_qfq.c
410
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
457
if (q->wsum + delta_w > QFQ_MAX_WSUM) {
net/sched/sch_qfq.c
460
delta_w, q->wsum);
net/sched/sch_qfq.c
508
new_agg = qfq_find_agg(q, lmax, weight);
net/sched/sch_qfq.c
518
qfq_init_agg(q, new_agg, lmax, weight);
net/sched/sch_qfq.c
521
qfq_deact_rm_from_agg(q, cl);
net/sched/sch_qfq.c
523
qdisc_class_hash_insert(&q->clhash, &cl->common);
net/sched/sch_qfq.c
524
qfq_add_to_agg(q, new_agg, cl);
net/sched/sch_qfq.c
526
qdisc_class_hash_grow(sch, &q->clhash);
net/sched/sch_qfq.c
549
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
560
qdisc_class_hash_remove(&q->clhash, &cl->common);
net/sched/sch_qfq.c
561
qfq_rm_from_agg(q, cl);
net/sched/sch_qfq.c
577
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
582
return q->block;
net/sched/sch_qfq.c
679
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
686
for (i = 0; i < q->clhash.hashsize; i++) {
net/sched/sch_qfq.c
687
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
net/sched/sch_qfq.c
697
struct qfq_sched *q = qdisc_priv(sch);
net/sched/sch_qfq.c
711
fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_qfq.c
747
static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
net/sched/sch_qfq.c
751
return &q->groups[index];
net/sched/sch_qfq.c
764
static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
net/sched/sch_qfq.c
767
unsigned int state = qfq_gt(grp->S, q->V);
net/sched/sch_qfq.c
768
unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
net/sched/sch_qfq.c
772
next = qfq_ffs(q, mask);
net/sched/sch_qfq.c
787
static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
net/sched/sch_qfq.c
790
q->bitmaps[dst] |= q->bitmaps[src] & mask;
net/sched/sch_qfq.c
791
q->bitmaps[src] &= ~mask;
net/sched/sch_qfq.c
794
static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
net/sched/sch_qfq.c
796
unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
net/sched/sch_qfq.c
800
next = qfq_ffs(q, mask);
net/sched/sch_qfq.c
806
qfq_move_groups(q, mask, EB, ER);
net/sched/sch_qfq.c
807
qfq_move_groups(q, mask, IB, IR);
net/sched/sch_qfq.c
820
static void qfq_make_eligible(struct qfq_sched *q)
net/sched/sch_qfq.c
822
unsigned long vslot = q->V >> q->min_slot_shift;
net/sched/sch_qfq.c
823
unsigned long old_vslot = q->oldV >> q->min_slot_shift;
net/sched/sch_qfq.c
834
qfq_move_groups(q, mask, IR, ER);
net/sched/sch_qfq.c
835
qfq_move_groups(q, mask, IB, EB);
net/sched/sch_qfq.c
975
static void qfq_update_eligible(struct qfq_sched *q)
net/sched/sch_qfq.c
980
ineligible = q->bitmaps[IR] | q->bitmaps[IB];
net/sched/sch_qfq.c
982
if (!q->bitmaps[ER]) {
net/sched/sch_qfq.c
983
grp = qfq_ffs(q, ineligible);
net/sched/sch_qfq.c
984
if (qfq_gt(grp->S, q->V))
net/sched/sch_qfq.c
985
q->V = grp->S;
net/sched/sch_qfq.c
987
qfq_make_eligible(q);
net/sched/sch_red.c
102
} else if (!red_use_nodrop(q)) {
net/sched/sch_red.c
103
q->stats.prob_drop++;
net/sched/sch_red.c
113
if (red_use_harddrop(q) || !red_use_ecn(q)) {
net/sched/sch_red.c
114
q->stats.forced_drop++;
net/sched/sch_red.c
119
q->stats.forced_mark++;
net/sched/sch_red.c
120
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
net/sched/sch_red.c
123
} else if (!red_use_nodrop(q)) {
net/sched/sch_red.c
124
q->stats.forced_drop++;
net/sched/sch_red.c
136
sch->q.qlen++;
net/sched/sch_red.c
138
q->stats.pdrop++;
net/sched/sch_red.c
144
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
net/sched/sch_red.c
155
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
156
struct Qdisc *child = q->qdisc;
net/sched/sch_red.c
162
sch->q.qlen--;
net/sched/sch_red.c
164
if (!red_is_idling(&q->vars))
net/sched/sch_red.c
165
red_start_of_idle_period(&q->vars);
net/sched/sch_red.c
172
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
173
struct Qdisc *child = q->qdisc;
net/sched/sch_red.c
180
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
182
qdisc_reset(q->qdisc);
net/sched/sch_red.c
183
red_restart(&q->vars);
net/sched/sch_red.c
188
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
200
opt.set.min = q->parms.qth_min >> q->parms.Wlog;
net/sched/sch_red.c
201
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
net/sched/sch_red.c
202
opt.set.probability = q->parms.max_P;
net/sched/sch_red.c
203
opt.set.limit = q->limit;
net/sched/sch_red.c
204
opt.set.is_ecn = red_use_ecn(q);
net/sched/sch_red.c
205
opt.set.is_harddrop = red_use_harddrop(q);
net/sched/sch_red.c
206
opt.set.is_nodrop = red_use_nodrop(q);
net/sched/sch_red.c
217
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
219
tcf_qevent_destroy(&q->qe_mark, sch);
net/sched/sch_red.c
220
tcf_qevent_destroy(&q->qe_early_drop, sch);
net/sched/sch_red.c
221
timer_delete_sync(&q->adapt_timer);
net/sched/sch_red.c
223
qdisc_put(q->qdisc);
net/sched/sch_red.c
240
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
279
flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
net/sched/sch_red.c
284
q->flags = flags;
net/sched/sch_red.c
285
q->userbits = userbits;
net/sched/sch_red.c
286
q->limit = ctl->limit;
net/sched/sch_red.c
288
qdisc_purge_queue(q->qdisc);
net/sched/sch_red.c
289
old_child = q->qdisc;
net/sched/sch_red.c
290
q->qdisc = child;
net/sched/sch_red.c
293
red_set_parms(&q->parms,
net/sched/sch_red.c
298
red_set_vars(&q->vars);
net/sched/sch_red.c
300
timer_delete(&q->adapt_timer);
net/sched/sch_red.c
302
mod_timer(&q->adapt_timer, jiffies + HZ/2);
net/sched/sch_red.c
304
if (!q->qdisc->q.qlen)
net/sched/sch_red.c
305
red_start_of_idle_period(&q->vars);
net/sched/sch_red.c
324
struct red_sched_data *q = timer_container_of(q, t, adapt_timer);
net/sched/sch_red.c
325
struct Qdisc *sch = q->sch;
net/sched/sch_red.c
331
red_adaptative_algo(&q->parms, &q->vars);
net/sched/sch_red.c
332
mod_timer(&q->adapt_timer, jiffies + HZ/2);
net/sched/sch_red.c
340
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
344
q->qdisc = &noop_qdisc;
net/sched/sch_red.c
345
q->sch = sch;
net/sched/sch_red.c
346
timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
net/sched/sch_red.c
360
err = tcf_qevent_init(&q->qe_early_drop, sch,
net/sched/sch_red.c
366
return tcf_qevent_init(&q->qe_mark, sch,
net/sched/sch_red.c
374
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
383
err = tcf_qevent_validate_change(&q->qe_early_drop,
net/sched/sch_red.c
388
err = tcf_qevent_validate_change(&q->qe_mark,
net/sched/sch_red.c
413
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
416
.limit = q->limit,
net/sched/sch_red.c
417
.flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
net/sched/sch_red.c
418
q->userbits,
net/sched/sch_red.c
419
.qth_min = q->parms.qth_min >> q->parms.Wlog,
net/sched/sch_red.c
420
.qth_max = q->parms.qth_max >> q->parms.Wlog,
net/sched/sch_red.c
421
.Wlog = q->parms.Wlog,
net/sched/sch_red.c
422
.Plog = q->parms.Plog,
net/sched/sch_red.c
423
.Scell_log = q->parms.Scell_log,
net/sched/sch_red.c
435
nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
net/sched/sch_red.c
437
q->flags, TC_RED_SUPPORTED_FLAGS) ||
net/sched/sch_red.c
438
tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
net/sched/sch_red.c
439
tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
net/sched/sch_red.c
450
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
460
.xstats = &q->stats,
net/sched/sch_red.c
466
st.early = q->stats.prob_drop + q->stats.forced_drop;
net/sched/sch_red.c
467
st.pdrop = q->stats.pdrop;
net/sched/sch_red.c
468
st.marked = q->stats.prob_mark + q->stats.forced_mark;
net/sched/sch_red.c
476
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
479
tcm->tcm_info = q->qdisc->handle;
net/sched/sch_red.c
501
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
506
*old = qdisc_replace(sch, new, &q->qdisc);
net/sched/sch_red.c
514
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
515
return q->qdisc;
net/sched/sch_red.c
55
static inline int red_use_ecn(struct red_sched_data *q)
net/sched/sch_red.c
57
return q->flags & TC_RED_ECN;
net/sched/sch_red.c
60
static inline int red_use_harddrop(struct red_sched_data *q)
net/sched/sch_red.c
62
return q->flags & TC_RED_HARDDROP;
net/sched/sch_red.c
65
static int red_use_nodrop(struct red_sched_data *q)
net/sched/sch_red.c
67
return q->flags & TC_RED_NODROP;
net/sched/sch_red.c
74
struct red_sched_data *q = qdisc_priv(sch);
net/sched/sch_red.c
75
struct Qdisc *child = q->qdisc;
net/sched/sch_red.c
79
q->vars.qavg = red_calc_qavg(&q->parms,
net/sched/sch_red.c
80
&q->vars,
net/sched/sch_red.c
83
if (red_is_idling(&q->vars))
net/sched/sch_red.c
84
red_end_of_idle_period(&q->vars);
net/sched/sch_red.c
86
switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
net/sched/sch_red.c
92
if (!red_use_ecn(q)) {
net/sched/sch_red.c
93
q->stats.prob_drop++;
net/sched/sch_red.c
98
q->stats.prob_mark++;
net/sched/sch_red.c
99
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
net/sched/sch_sfb.c
123
static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
net/sched/sch_sfb.c
126
struct sfb_bucket *b = &q->bins[slot].bins[0][0];
net/sched/sch_sfb.c
138
static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
net/sched/sch_sfb.c
144
increment_one_qlen(sfbhash, 0, q);
net/sched/sch_sfb.c
148
increment_one_qlen(sfbhash, 1, q);
net/sched/sch_sfb.c
152
struct sfb_sched_data *q)
net/sched/sch_sfb.c
155
struct sfb_bucket *b = &q->bins[slot].bins[0][0];
net/sched/sch_sfb.c
167
static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
net/sched/sch_sfb.c
173
decrement_one_qlen(sfbhash, 0, q);
net/sched/sch_sfb.c
177
decrement_one_qlen(sfbhash, 1, q);
net/sched/sch_sfb.c
180
static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
net/sched/sch_sfb.c
182
b->p_mark = prob_minus(b->p_mark, q->decrement);
net/sched/sch_sfb.c
185
static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
net/sched/sch_sfb.c
187
b->p_mark = prob_plus(b->p_mark, q->increment);
net/sched/sch_sfb.c
190
static void sfb_zero_all_buckets(struct sfb_sched_data *q)
net/sched/sch_sfb.c
192
memset(&q->bins, 0, sizeof(q->bins));
net/sched/sch_sfb.c
198
static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
net/sched/sch_sfb.c
202
const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
net/sched/sch_sfb.c
218
static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
net/sched/sch_sfb.c
220
get_random_bytes(&q->bins[slot].perturbation,
net/sched/sch_sfb.c
221
sizeof(q->bins[slot].perturbation));
net/sched/sch_sfb.c
224
static void sfb_swap_slot(struct sfb_sched_data *q)
net/sched/sch_sfb.c
226
sfb_init_perturbation(q->slot, q);
net/sched/sch_sfb.c
227
q->slot ^= 1;
net/sched/sch_sfb.c
228
q->double_buffering = false;
net/sched/sch_sfb.c
234
static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
net/sched/sch_sfb.c
236
if (q->penalty_rate == 0 || q->penalty_burst == 0)
net/sched/sch_sfb.c
239
if (q->tokens_avail < 1) {
net/sched/sch_sfb.c
240
unsigned long age = min(10UL * HZ, jiffies - q->token_time);
net/sched/sch_sfb.c
242
q->tokens_avail = (age * q->penalty_rate) / HZ;
net/sched/sch_sfb.c
243
if (q->tokens_avail > q->penalty_burst)
net/sched/sch_sfb.c
244
q->tokens_avail = q->penalty_burst;
net/sched/sch_sfb.c
245
q->token_time = jiffies;
net/sched/sch_sfb.c
246
if (q->tokens_avail < 1)
net/sched/sch_sfb.c
250
q->tokens_avail--;
net/sched/sch_sfb.c
284
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
286
struct Qdisc *child = q->qdisc;
net/sched/sch_sfb.c
293
u32 slot = q->slot;
net/sched/sch_sfb.c
296
if (unlikely(sch->q.qlen >= q->limit)) {
net/sched/sch_sfb.c
298
q->stats.queuedrop++;
net/sched/sch_sfb.c
302
if (q->rehash_interval > 0) {
net/sched/sch_sfb.c
303
unsigned long limit = q->rehash_time + q->rehash_interval;
net/sched/sch_sfb.c
306
sfb_swap_slot(q);
net/sched/sch_sfb.c
307
q->rehash_time = jiffies;
net/sched/sch_sfb.c
308
} else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
net/sched/sch_sfb.c
309
time_after(jiffies, limit - q->warmup_time))) {
net/sched/sch_sfb.c
310
q->double_buffering = true;
net/sched/sch_sfb.c
314
fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_sfb.c
321
sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
net/sched/sch_sfb.c
323
sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
net/sched/sch_sfb.c
333
struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
net/sched/sch_sfb.c
337
decrement_prob(b, q);
net/sched/sch_sfb.c
338
else if (b->qlen >= q->bin_size)
net/sched/sch_sfb.c
339
increment_prob(b, q);
net/sched/sch_sfb.c
349
if (unlikely(minqlen >= q->max)) {
net/sched/sch_sfb.c
351
q->stats.bucketdrop++;
net/sched/sch_sfb.c
357
if (q->double_buffering) {
net/sched/sch_sfb.c
359
&q->bins[slot].perturbation);
net/sched/sch_sfb.c
366
struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
net/sched/sch_sfb.c
370
decrement_prob(b, q);
net/sched/sch_sfb.c
371
else if (b->qlen >= q->bin_size)
net/sched/sch_sfb.c
372
increment_prob(b, q);
net/sched/sch_sfb.c
375
if (sfb_rate_limit(skb, q)) {
net/sched/sch_sfb.c
377
q->stats.penaltydrop++;
net/sched/sch_sfb.c
393
q->stats.earlydrop++;
net/sched/sch_sfb.c
398
q->stats.marked++;
net/sched/sch_sfb.c
400
q->stats.earlydrop++;
net/sched/sch_sfb.c
410
sch->q.qlen++;
net/sched/sch_sfb.c
411
increment_qlen(&cb, q);
net/sched/sch_sfb.c
413
q->stats.childdrop++;
net/sched/sch_sfb.c
430
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
431
struct Qdisc *child = q->qdisc;
net/sched/sch_sfb.c
434
skb = child->dequeue(q->qdisc);
net/sched/sch_sfb.c
439
sch->q.qlen--;
net/sched/sch_sfb.c
440
decrement_qlen(skb, q);
net/sched/sch_sfb.c
448
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
449
struct Qdisc *child = q->qdisc;
net/sched/sch_sfb.c
458
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
460
if (likely(q->qdisc))
net/sched/sch_sfb.c
461
qdisc_reset(q->qdisc);
net/sched/sch_sfb.c
462
q->slot = 0;
net/sched/sch_sfb.c
463
q->double_buffering = false;
net/sched/sch_sfb.c
464
sfb_zero_all_buckets(q);
net/sched/sch_sfb.c
465
sfb_init_perturbation(0, q);
net/sched/sch_sfb.c
470
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
472
tcf_block_put(q->block);
net/sched/sch_sfb.c
473
qdisc_put(q->qdisc);
net/sched/sch_sfb.c
495
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
526
qdisc_purge_queue(q->qdisc);
net/sched/sch_sfb.c
527
old = q->qdisc;
net/sched/sch_sfb.c
528
q->qdisc = child;
net/sched/sch_sfb.c
530
q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
net/sched/sch_sfb.c
531
q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
net/sched/sch_sfb.c
532
q->rehash_time = jiffies;
net/sched/sch_sfb.c
533
q->limit = limit;
net/sched/sch_sfb.c
534
q->increment = ctl->increment;
net/sched/sch_sfb.c
535
q->decrement = ctl->decrement;
net/sched/sch_sfb.c
536
q->max = ctl->max;
net/sched/sch_sfb.c
537
q->bin_size = ctl->bin_size;
net/sched/sch_sfb.c
538
q->penalty_rate = ctl->penalty_rate;
net/sched/sch_sfb.c
539
q->penalty_burst = ctl->penalty_burst;
net/sched/sch_sfb.c
540
q->tokens_avail = ctl->penalty_burst;
net/sched/sch_sfb.c
541
q->token_time = jiffies;
net/sched/sch_sfb.c
543
q->slot = 0;
net/sched/sch_sfb.c
544
q->double_buffering = false;
net/sched/sch_sfb.c
545
sfb_zero_all_buckets(q);
net/sched/sch_sfb.c
546
sfb_init_perturbation(0, q);
net/sched/sch_sfb.c
547
sfb_init_perturbation(1, q);
net/sched/sch_sfb.c
558
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
561
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_sfb.c
565
q->qdisc = &noop_qdisc;
net/sched/sch_sfb.c
571
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
574
.rehash_interval = jiffies_to_msecs(q->rehash_interval),
net/sched/sch_sfb.c
575
.warmup_time = jiffies_to_msecs(q->warmup_time),
net/sched/sch_sfb.c
576
.limit = q->limit,
net/sched/sch_sfb.c
577
.max = q->max,
net/sched/sch_sfb.c
578
.bin_size = q->bin_size,
net/sched/sch_sfb.c
579
.increment = q->increment,
net/sched/sch_sfb.c
580
.decrement = q->decrement,
net/sched/sch_sfb.c
581
.penalty_rate = q->penalty_rate,
net/sched/sch_sfb.c
582
.penalty_burst = q->penalty_burst,
net/sched/sch_sfb.c
585
sch->qstats.backlog = q->qdisc->qstats.backlog;
net/sched/sch_sfb.c
600
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
602
.earlydrop = q->stats.earlydrop,
net/sched/sch_sfb.c
603
.penaltydrop = q->stats.penaltydrop,
net/sched/sch_sfb.c
604
.bucketdrop = q->stats.bucketdrop,
net/sched/sch_sfb.c
605
.queuedrop = q->stats.queuedrop,
net/sched/sch_sfb.c
606
.childdrop = q->stats.childdrop,
net/sched/sch_sfb.c
607
.marked = q->stats.marked,
net/sched/sch_sfb.c
610
st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
net/sched/sch_sfb.c
624
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
629
*old = qdisc_replace(sch, new, &q->qdisc);
net/sched/sch_sfb.c
635
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
637
return q->qdisc;
net/sched/sch_sfb.c
672
struct sfb_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfb.c
676
return q->block;
net/sched/sch_sfq.c
143
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
net/sched/sch_sfq.c
146
return &q->slots[val].dep;
net/sched/sch_sfq.c
147
return &q->dep[val - SFQ_MAX_FLOWS];
net/sched/sch_sfq.c
150
static unsigned int sfq_hash(const struct sfq_sched_data *q,
net/sched/sch_sfq.c
153
return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
net/sched/sch_sfq.c
159
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
166
TC_H_MIN(skb->priority) <= q->divisor)
net/sched/sch_sfq.c
169
fl = rcu_dereference_bh(q->filter_list);
net/sched/sch_sfq.c
171
return sfq_hash(q, skb) + 1;
net/sched/sch_sfq.c
187
if (TC_H_MIN(res.classid) <= q->divisor)
net/sched/sch_sfq.c
196
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
net/sched/sch_sfq.c
199
struct sfq_slot *slot = &q->slots[x];
net/sched/sch_sfq.c
203
n = q->dep[qlen].next;
net/sched/sch_sfq.c
208
q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
net/sched/sch_sfq.c
209
sfq_dep_head(q, n)->prev = x;
net/sched/sch_sfq.c
212
#define sfq_unlink(q, x, n, p) \
net/sched/sch_sfq.c
214
n = q->slots[x].dep.next; \
net/sched/sch_sfq.c
215
p = q->slots[x].dep.prev; \
net/sched/sch_sfq.c
216
sfq_dep_head(q, p)->next = n; \
net/sched/sch_sfq.c
217
sfq_dep_head(q, n)->prev = p; \
net/sched/sch_sfq.c
221
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
net/sched/sch_sfq.c
226
sfq_unlink(q, x, n, p);
net/sched/sch_sfq.c
228
d = q->slots[x].qlen--;
net/sched/sch_sfq.c
229
if (n == p && q->cur_depth == d)
net/sched/sch_sfq.c
230
q->cur_depth--;
net/sched/sch_sfq.c
231
sfq_link(q, x);
net/sched/sch_sfq.c
234
static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
net/sched/sch_sfq.c
239
sfq_unlink(q, x, n, p);
net/sched/sch_sfq.c
241
d = ++q->slots[x].qlen;
net/sched/sch_sfq.c
242
if (q->cur_depth < d)
net/sched/sch_sfq.c
243
q->cur_depth = d;
net/sched/sch_sfq.c
244
sfq_link(q, x);
net/sched/sch_sfq.c
288
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
289
sfq_index x, d = q->cur_depth;
net/sched/sch_sfq.c
296
x = q->dep[d].next;
net/sched/sch_sfq.c
297
slot = &q->slots[x];
net/sched/sch_sfq.c
299
skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
net/sched/sch_sfq.c
302
sfq_dec(q, x);
net/sched/sch_sfq.c
303
sch->q.qlen--;
net/sched/sch_sfq.c
311
x = q->tail->next;
net/sched/sch_sfq.c
312
slot = &q->slots[x];
net/sched/sch_sfq.c
314
q->tail = NULL; /* no more active slots */
net/sched/sch_sfq.c
316
q->tail->next = slot->next;
net/sched/sch_sfq.c
317
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
net/sched/sch_sfq.c
325
static int sfq_prob_mark(const struct sfq_sched_data *q)
net/sched/sch_sfq.c
327
return q->flags & TC_RED_ECN;
net/sched/sch_sfq.c
331
static int sfq_hard_mark(const struct sfq_sched_data *q)
net/sched/sch_sfq.c
333
return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
net/sched/sch_sfq.c
336
static int sfq_headdrop(const struct sfq_sched_data *q)
net/sched/sch_sfq.c
338
return q->headdrop;
net/sched/sch_sfq.c
344
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
361
x = q->ht[hash];
net/sched/sch_sfq.c
362
slot = &q->slots[x];
net/sched/sch_sfq.c
364
x = q->dep[0].next; /* get a free slot */
net/sched/sch_sfq.c
367
q->ht[hash] = x;
net/sched/sch_sfq.c
368
slot = &q->slots[x];
net/sched/sch_sfq.c
374
if (q->red_parms) {
net/sched/sch_sfq.c
375
slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
net/sched/sch_sfq.c
378
switch (red_action(q->red_parms,
net/sched/sch_sfq.c
386
if (sfq_prob_mark(q)) {
net/sched/sch_sfq.c
388
if (sfq_headdrop(q) &&
net/sched/sch_sfq.c
390
q->stats.prob_mark_head++;
net/sched/sch_sfq.c
394
q->stats.prob_mark++;
net/sched/sch_sfq.c
398
q->stats.prob_drop++;
net/sched/sch_sfq.c
403
if (sfq_hard_mark(q)) {
net/sched/sch_sfq.c
405
if (sfq_headdrop(q) &&
net/sched/sch_sfq.c
407
q->stats.forced_mark_head++;
net/sched/sch_sfq.c
411
q->stats.forced_mark++;
net/sched/sch_sfq.c
415
q->stats.forced_drop++;
net/sched/sch_sfq.c
420
if (slot->qlen >= q->maxdepth) {
net/sched/sch_sfq.c
422
if (!sfq_headdrop(q))
net/sched/sch_sfq.c
441
sfq_inc(q, x);
net/sched/sch_sfq.c
443
if (q->tail == NULL) { /* It is the first flow */
net/sched/sch_sfq.c
446
slot->next = q->tail->next;
net/sched/sch_sfq.c
447
q->tail->next = x;
net/sched/sch_sfq.c
453
q->tail = slot;
net/sched/sch_sfq.c
455
slot->allot = q->quantum;
net/sched/sch_sfq.c
457
if (++sch->q.qlen <= q->limit)
net/sched/sch_sfq.c
478
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
484
if (q->tail == NULL)
net/sched/sch_sfq.c
488
a = q->tail->next;
net/sched/sch_sfq.c
489
slot = &q->slots[a];
net/sched/sch_sfq.c
491
q->tail = slot;
net/sched/sch_sfq.c
492
slot->allot += q->quantum;
net/sched/sch_sfq.c
496
sfq_dec(q, a);
net/sched/sch_sfq.c
498
sch->q.qlen--;
net/sched/sch_sfq.c
503
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
net/sched/sch_sfq.c
506
q->tail = NULL; /* no more active slots */
net/sched/sch_sfq.c
509
q->tail->next = next_a;
net/sched/sch_sfq.c
533
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
543
for (i = 0; i < q->maxflows; i++) {
net/sched/sch_sfq.c
544
slot = &q->slots[i];
net/sched/sch_sfq.c
549
sfq_dec(q, i);
net/sched/sch_sfq.c
554
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
net/sched/sch_sfq.c
556
q->tail = NULL;
net/sched/sch_sfq.c
559
unsigned int hash = sfq_hash(q, skb);
net/sched/sch_sfq.c
560
sfq_index x = q->ht[hash];
net/sched/sch_sfq.c
562
slot = &q->slots[x];
net/sched/sch_sfq.c
564
x = q->dep[0].next; /* get a free slot */
net/sched/sch_sfq.c
573
q->ht[hash] = x;
net/sched/sch_sfq.c
574
slot = &q->slots[x];
net/sched/sch_sfq.c
577
if (slot->qlen >= q->maxdepth)
net/sched/sch_sfq.c
580
if (q->red_parms)
net/sched/sch_sfq.c
581
slot->vars.qavg = red_calc_qavg(q->red_parms,
net/sched/sch_sfq.c
585
sfq_inc(q, x);
net/sched/sch_sfq.c
587
if (q->tail == NULL) { /* It is the first flow */
net/sched/sch_sfq.c
590
slot->next = q->tail->next;
net/sched/sch_sfq.c
591
q->tail->next = x;
net/sched/sch_sfq.c
593
q->tail = slot;
net/sched/sch_sfq.c
594
slot->allot = q->quantum;
net/sched/sch_sfq.c
597
sch->q.qlen -= dropped;
net/sched/sch_sfq.c
603
struct sfq_sched_data *q = timer_container_of(q, t, perturb_timer);
net/sched/sch_sfq.c
604
struct Qdisc *sch = q->sch;
net/sched/sch_sfq.c
613
q->perturbation = nkey;
net/sched/sch_sfq.c
614
if (!q->filter_list && q->tail)
net/sched/sch_sfq.c
621
period = READ_ONCE(q->perturb_period);
net/sched/sch_sfq.c
623
mod_timer(&q->perturb_timer, jiffies + period);
net/sched/sch_sfq.c
630
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
678
limit = q->limit;
net/sched/sch_sfq.c
679
divisor = q->divisor;
net/sched/sch_sfq.c
680
headdrop = q->headdrop;
net/sched/sch_sfq.c
681
maxdepth = q->maxdepth;
net/sched/sch_sfq.c
682
maxflows = q->maxflows;
net/sched/sch_sfq.c
683
quantum = q->quantum;
net/sched/sch_sfq.c
684
flags = q->flags;
net/sched/sch_sfq.c
721
q->limit = limit;
net/sched/sch_sfq.c
722
q->divisor = divisor;
net/sched/sch_sfq.c
723
q->headdrop = headdrop;
net/sched/sch_sfq.c
724
q->maxdepth = maxdepth;
net/sched/sch_sfq.c
725
q->maxflows = maxflows;
net/sched/sch_sfq.c
726
WRITE_ONCE(q->perturb_period, perturb_period);
net/sched/sch_sfq.c
727
q->quantum = quantum;
net/sched/sch_sfq.c
728
q->flags = flags;
net/sched/sch_sfq.c
730
swap(q->red_parms, p);
net/sched/sch_sfq.c
732
qlen = sch->q.qlen;
net/sched/sch_sfq.c
733
while (sch->q.qlen > q->limit) {
net/sched/sch_sfq.c
740
qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
net/sched/sch_sfq.c
742
timer_delete(&q->perturb_timer);
net/sched/sch_sfq.c
743
if (q->perturb_period) {
net/sched/sch_sfq.c
744
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
net/sched/sch_sfq.c
745
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
net/sched/sch_sfq.c
764
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
766
tcf_block_put(q->block);
net/sched/sch_sfq.c
767
WRITE_ONCE(q->perturb_period, 0);
net/sched/sch_sfq.c
768
timer_delete_sync(&q->perturb_timer);
net/sched/sch_sfq.c
769
sfq_free(q->ht);
net/sched/sch_sfq.c
770
sfq_free(q->slots);
net/sched/sch_sfq.c
771
kfree(q->red_parms);
net/sched/sch_sfq.c
777
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
781
q->sch = sch;
net/sched/sch_sfq.c
782
timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
net/sched/sch_sfq.c
784
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
net/sched/sch_sfq.c
789
q->dep[i].next = i + SFQ_MAX_FLOWS;
net/sched/sch_sfq.c
790
q->dep[i].prev = i + SFQ_MAX_FLOWS;
net/sched/sch_sfq.c
793
q->limit = SFQ_MAX_DEPTH;
net/sched/sch_sfq.c
794
q->maxdepth = SFQ_MAX_DEPTH;
net/sched/sch_sfq.c
795
q->cur_depth = 0;
net/sched/sch_sfq.c
796
q->tail = NULL;
net/sched/sch_sfq.c
797
q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
net/sched/sch_sfq.c
798
q->maxflows = SFQ_DEFAULT_FLOWS;
net/sched/sch_sfq.c
799
q->quantum = psched_mtu(qdisc_dev(sch));
net/sched/sch_sfq.c
800
q->perturb_period = 0;
net/sched/sch_sfq.c
801
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
net/sched/sch_sfq.c
809
q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
net/sched/sch_sfq.c
810
q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
net/sched/sch_sfq.c
811
if (!q->ht || !q->slots) {
net/sched/sch_sfq.c
816
for (i = 0; i < q->divisor; i++)
net/sched/sch_sfq.c
817
q->ht[i] = SFQ_EMPTY_SLOT;
net/sched/sch_sfq.c
819
for (i = 0; i < q->maxflows; i++) {
net/sched/sch_sfq.c
820
slot_queue_init(&q->slots[i]);
net/sched/sch_sfq.c
821
sfq_link(q, i);
net/sched/sch_sfq.c
823
if (q->limit >= 1)
net/sched/sch_sfq.c
832
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
835
struct red_parms *p = q->red_parms;
net/sched/sch_sfq.c
838
opt.v0.quantum = q->quantum;
net/sched/sch_sfq.c
839
opt.v0.perturb_period = q->perturb_period / HZ;
net/sched/sch_sfq.c
840
opt.v0.limit = q->limit;
net/sched/sch_sfq.c
841
opt.v0.divisor = q->divisor;
net/sched/sch_sfq.c
842
opt.v0.flows = q->maxflows;
net/sched/sch_sfq.c
843
opt.depth = q->maxdepth;
net/sched/sch_sfq.c
844
opt.headdrop = q->headdrop;
net/sched/sch_sfq.c
854
memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
net/sched/sch_sfq.c
855
opt.flags = q->flags;
net/sched/sch_sfq.c
883
static void sfq_unbind(struct Qdisc *q, unsigned long cl)
net/sched/sch_sfq.c
890
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
894
return q->block;
net/sched/sch_sfq.c
907
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
908
sfq_index idx = q->ht[cl - 1];
net/sched/sch_sfq.c
913
const struct sfq_slot *slot = &q->slots[idx];
net/sched/sch_sfq.c
926
struct sfq_sched_data *q = qdisc_priv(sch);
net/sched/sch_sfq.c
932
for (i = 0; i < q->divisor; i++) {
net/sched/sch_sfq.c
933
if (q->ht[i] == SFQ_EMPTY_SLOT) {
net/sched/sch_skbprio.c
101
lp = q->lowest_prio;
net/sched/sch_skbprio.c
103
q->qstats[prio].drops++;
net/sched/sch_skbprio.c
104
q->qstats[prio].overlimits++;
net/sched/sch_skbprio.c
110
q->qstats[prio].backlog += qdisc_pkt_len(skb);
net/sched/sch_skbprio.c
113
lp_qdisc = &q->qdiscs[lp];
net/sched/sch_skbprio.c
119
q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
net/sched/sch_skbprio.c
120
q->qstats[lp].drops++;
net/sched/sch_skbprio.c
121
q->qstats[lp].overlimits++;
net/sched/sch_skbprio.c
125
if (q->lowest_prio == q->highest_prio) {
net/sched/sch_skbprio.c
126
q->lowest_prio = prio;
net/sched/sch_skbprio.c
127
q->highest_prio = prio;
net/sched/sch_skbprio.c
129
q->lowest_prio = calc_new_low_prio(q);
net/sched/sch_skbprio.c
133
if (prio > q->highest_prio)
net/sched/sch_skbprio.c
134
q->highest_prio = prio;
net/sched/sch_skbprio.c
141
struct skbprio_sched_data *q = qdisc_priv(sch);
net/sched/sch_skbprio.c
142
struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
net/sched/sch_skbprio.c
148
sch->q.qlen--;
net/sched/sch_skbprio.c
152
q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
net/sched/sch_skbprio.c
156
if (q->lowest_prio == q->highest_prio) {
net/sched/sch_skbprio.c
157
q->highest_prio = 0;
net/sched/sch_skbprio.c
158
q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
net/sched/sch_skbprio.c
160
q->highest_prio = calc_new_high_prio(q);
net/sched/sch_skbprio.c
181
struct skbprio_sched_data *q = qdisc_priv(sch);
net/sched/sch_skbprio.c
186
__skb_queue_head_init(&q->qdiscs[prio]);
net/sched/sch_skbprio.c
188
memset(&q->qstats, 0, sizeof(q->qstats));
net/sched/sch_skbprio.c
189
q->highest_prio = 0;
net/sched/sch_skbprio.c
190
q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
net/sched/sch_skbprio.c
212
struct skbprio_sched_data *q = qdisc_priv(sch);
net/sched/sch_skbprio.c
216
__skb_queue_purge(&q->qdiscs[prio]);
net/sched/sch_skbprio.c
218
memset(&q->qstats, 0, sizeof(q->qstats));
net/sched/sch_skbprio.c
219
q->highest_prio = 0;
net/sched/sch_skbprio.c
220
q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
net/sched/sch_skbprio.c
225
struct skbprio_sched_data *q = qdisc_priv(sch);
net/sched/sch_skbprio.c
229
__skb_queue_purge(&q->qdiscs[prio]);
net/sched/sch_skbprio.c
252
struct skbprio_sched_data *q = qdisc_priv(sch);
net/sched/sch_skbprio.c
253
if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
net/sched/sch_skbprio.c
254
q->qstats[cl - 1].qlen) < 0)
net/sched/sch_skbprio.c
40
static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
net/sched/sch_skbprio.c
44
for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
net/sched/sch_skbprio.c
45
if (!skb_queue_empty(&q->qdiscs[prio]))
net/sched/sch_skbprio.c
53
static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
net/sched/sch_skbprio.c
57
for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
net/sched/sch_skbprio.c
58
if (!skb_queue_empty(&q->qdiscs[prio]))
net/sched/sch_skbprio.c
72
struct skbprio_sched_data *q = qdisc_priv(sch);
net/sched/sch_skbprio.c
81
qdisc = &q->qdiscs[prio];
net/sched/sch_skbprio.c
84
if (sch->q.qlen < READ_ONCE(sch->limit)) {
net/sched/sch_skbprio.c
87
q->qstats[prio].backlog += qdisc_pkt_len(skb);
net/sched/sch_skbprio.c
90
if (prio > q->highest_prio)
net/sched/sch_skbprio.c
91
q->highest_prio = prio;
net/sched/sch_skbprio.c
93
if (prio < q->lowest_prio)
net/sched/sch_skbprio.c
94
q->lowest_prio = prio;
net/sched/sch_skbprio.c
96
sch->q.qlen++;
net/sched/sch_taprio.c
1036
static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
net/sched/sch_taprio.c
1040
int min_duration = length_to_duration(q, ETH_ZLEN);
net/sched/sch_taprio.c
1068
static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
net/sched/sch_taprio.c
1084
return fill_sched_entry(q, tb, entry, extack);
net/sched/sch_taprio.c
1087
static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
net/sched/sch_taprio.c
1112
err = parse_sched_entry(q, n, entry, i, extack);
net/sched/sch_taprio.c
1127
static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
net/sched/sch_taprio.c
1148
err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
net/sched/sch_taprio.c
1168
if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
net/sched/sch_taprio.c
1173
taprio_calculate_gate_durations(q, new);
net/sched/sch_taprio.c
119
static void taprio_calculate_gate_durations(struct taprio_sched *q,
net/sched/sch_taprio.c
1210
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
1215
now = taprio_get_time(q);
net/sched/sch_taprio.c
122
struct net_device *dev = qdisc_dev(q->root);
net/sched/sch_taprio.c
1240
static void setup_first_end_time(struct taprio_sched *q,
net/sched/sch_taprio.c
1243
struct net_device *dev = qdisc_dev(q->root);
net/sched/sch_taprio.c
1258
taprio_set_budgets(q, sched, first);
net/sched/sch_taprio.c
1267
rcu_assign_pointer(q->current_entry, NULL);
net/sched/sch_taprio.c
1273
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
1276
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
net/sched/sch_taprio.c
1279
expires = hrtimer_get_expires(&q->advance_timer);
net/sched/sch_taprio.c
1289
hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
net/sched/sch_taprio.c
1293
struct taprio_sched *q,
net/sched/sch_taprio.c
1320
atomic64_set(&q->picos_per_byte, picos_per_byte);
net/sched/sch_taprio.c
1322
dev->name, (long long)atomic64_read(&q->picos_per_byte),
net/sched/sch_taprio.c
1332
struct taprio_sched *q;
net/sched/sch_taprio.c
1339
list_for_each_entry(q, &taprio_list, taprio_list) {
net/sched/sch_taprio.c
1340
if (dev != qdisc_dev(q->root))
net/sched/sch_taprio.c
1343
taprio_set_picos_per_byte(dev, q, NULL);
net/sched/sch_taprio.c
1345
stab = rtnl_dereference(q->root->stab);
net/sched/sch_taprio.c
1348
oper = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
1350
taprio_update_queue_max_sdu(q, oper, stab);
net/sched/sch_taprio.c
1352
admin = rcu_dereference(q->admin_sched);
net/sched/sch_taprio.c
1354
taprio_update_queue_max_sdu(q, admin, stab);
net/sched/sch_taprio.c
1363
static void setup_txtime(struct taprio_sched *q,
net/sched/sch_taprio.c
1428
static void taprio_offload_config_changed(struct taprio_sched *q)
net/sched/sch_taprio.c
1432
oper = rtnl_dereference(q->oper_sched);
net/sched/sch_taprio.c
1433
admin = rtnl_dereference(q->admin_sched);
net/sched/sch_taprio.c
1435
switch_schedules(q, &admin, &oper);
net/sched/sch_taprio.c
1486
static void taprio_detect_broken_mqprio(struct taprio_sched *q)
net/sched/sch_taprio.c
1488
struct net_device *dev = qdisc_dev(q->root);
net/sched/sch_taprio.c
1494
q->broken_mqprio = caps.broken_mqprio;
net/sched/sch_taprio.c
1495
if (q->broken_mqprio)
net/sched/sch_taprio.c
1500
q->detected_mqprio = true;
net/sched/sch_taprio.c
1503
static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
net/sched/sch_taprio.c
1505
if (!q->detected_mqprio)
net/sched/sch_taprio.c
1508
if (q->broken_mqprio)
net/sched/sch_taprio.c
1515
struct taprio_sched *q,
net/sched/sch_taprio.c
1535
if (q->max_sdu[tc]) {
net/sched/sch_taprio.c
1554
mqprio_fp_to_offload(q->fp, &offload->mqprio);
net/sched/sch_taprio.c
1557
offload->max_sdu[tc] = q->max_sdu[tc];
net/sched/sch_taprio.c
1566
q->offloaded = true;
net/sched/sch_taprio.c
1581
struct taprio_sched *q,
net/sched/sch_taprio.c
1588
if (!q->offloaded)
net/sched/sch_taprio.c
1606
q->offloaded = false;
net/sched/sch_taprio.c
1624
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
1628
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
1658
(q->clockid != -1 && q->clockid != clockid)) {
net/sched/sch_taprio.c
1684
WRITE_ONCE(q->tk_offset, tk_offset);
net/sched/sch_taprio.c
1686
q->clockid = clockid;
net/sched/sch_taprio.c
1750
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
1761
max_sdu[tc] = q->max_sdu[tc];
net/sched/sch_taprio.c
1762
fp[tc] = q->fp[tc];
net/sched/sch_taprio.c
1773
q->max_sdu[tc] = max_sdu[tc];
net/sched/sch_taprio.c
1774
q->fp[tc] = fp[tc];
net/sched/sch_taprio.c
178
static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
net/sched/sch_taprio.c
1780
if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
181
enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
net/sched/sch_taprio.c
1822
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
1854
if (q->flags != TAPRIO_FLAGS_INVALID && q->flags != taprio_flags) {
net/sched/sch_taprio.c
1859
q->flags = taprio_flags;
net/sched/sch_taprio.c
1862
taprio_set_picos_per_byte(dev, q, extack);
net/sched/sch_taprio.c
1864
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
net/sched/sch_taprio.c
1879
oper = rtnl_dereference(q->oper_sched);
net/sched/sch_taprio.c
1880
admin = rtnl_dereference(q->admin_sched);
net/sched/sch_taprio.c
1900
q->cur_txq[i] = mqprio->offset[i];
net/sched/sch_taprio.c
1909
err = parse_taprio_schedule(q, tb, new_admin, extack);
net/sched/sch_taprio.c
191
static ktime_t taprio_get_time(const struct taprio_sched *q)
net/sched/sch_taprio.c
1923
taprio_update_queue_max_sdu(q, new_admin, stab);
net/sched/sch_taprio.c
1925
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
net/sched/sch_taprio.c
1926
err = taprio_enable_offload(dev, q, new_admin, extack);
net/sched/sch_taprio.c
1928
err = taprio_disable_offload(dev, q, extack);
net/sched/sch_taprio.c
193
return taprio_mono_to_any(q, ktime_get());
net/sched/sch_taprio.c
1936
if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
1942
q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
net/sched/sch_taprio.c
1945
if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
net/sched/sch_taprio.c
1946
!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
net/sched/sch_taprio.c
1947
!hrtimer_active(&q->advance_timer)) {
net/sched/sch_taprio.c
1948
hrtimer_setup(&q->advance_timer, advance_sched, q->clockid, HRTIMER_MODE_ABS);
net/sched/sch_taprio.c
1957
setup_txtime(q, new_admin, start);
net/sched/sch_taprio.c
1959
if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
1961
rcu_assign_pointer(q->oper_sched, new_admin);
net/sched/sch_taprio.c
1968
admin = rcu_replace_pointer(q->admin_sched, new_admin,
net/sched/sch_taprio.c
1973
setup_first_end_time(q, new_admin, start);
net/sched/sch_taprio.c
1976
spin_lock_irqsave(&q->current_entry_lock, flags);
net/sched/sch_taprio.c
1980
admin = rcu_replace_pointer(q->admin_sched, new_admin,
net/sched/sch_taprio.c
1985
spin_unlock_irqrestore(&q->current_entry_lock, flags);
net/sched/sch_taprio.c
1987
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
net/sched/sch_taprio.c
1988
taprio_offload_config_changed(q);
net/sched/sch_taprio.c
2010
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
2014
hrtimer_cancel(&q->advance_timer);
net/sched/sch_taprio.c
2016
if (q->qdiscs) {
net/sched/sch_taprio.c
2018
if (q->qdiscs[i])
net/sched/sch_taprio.c
2019
qdisc_reset(q->qdiscs[i]);
net/sched/sch_taprio.c
2025
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
2030
list_del(&q->taprio_list);
net/sched/sch_taprio.c
2035
hrtimer_cancel(&q->advance_timer);
net/sched/sch_taprio.c
2038
taprio_disable_offload(dev, q, NULL);
net/sched/sch_taprio.c
2040
if (q->qdiscs) {
net/sched/sch_taprio.c
2042
qdisc_put(q->qdiscs[i]);
net/sched/sch_taprio.c
2044
kfree(q->qdiscs);
net/sched/sch_taprio.c
2046
q->qdiscs = NULL;
net/sched/sch_taprio.c
2050
oper = rtnl_dereference(q->oper_sched);
net/sched/sch_taprio.c
2051
admin = rtnl_dereference(q->admin_sched);
net/sched/sch_taprio.c
2059
taprio_cleanup_broken_mqprio(q);
net/sched/sch_taprio.c
2065
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
2069
spin_lock_init(&q->current_entry_lock);
net/sched/sch_taprio.c
2071
hrtimer_setup(&q->advance_timer, advance_sched, CLOCK_TAI, HRTIMER_MODE_ABS);
net/sched/sch_taprio.c
2073
q->root = sch;
net/sched/sch_taprio.c
2078
q->clockid = -1;
net/sched/sch_taprio.c
2079
q->flags = TAPRIO_FLAGS_INVALID;
net/sched/sch_taprio.c
2081
list_add(&q->taprio_list, &taprio_list);
net/sched/sch_taprio.c
209
static void switch_schedules(struct taprio_sched *q,
net/sched/sch_taprio.c
2093
q->qdiscs = kzalloc_objs(q->qdiscs[0], dev->num_tx_queues);
net/sched/sch_taprio.c
2094
if (!q->qdiscs)
net/sched/sch_taprio.c
2116
q->qdiscs[i] = qdisc;
net/sched/sch_taprio.c
2120
q->fp[tc] = TC_FP_EXPRESS;
net/sched/sch_taprio.c
2122
taprio_detect_broken_mqprio(q);
net/sched/sch_taprio.c
2129
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
213
rcu_assign_pointer(q->oper_sched, *admin);
net/sched/sch_taprio.c
2138
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
2139
struct Qdisc *qdisc = q->qdiscs[ntx];
net/sched/sch_taprio.c
214
rcu_assign_pointer(q->admin_sched, NULL);
net/sched/sch_taprio.c
2179
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
2196
*old = q->qdiscs[cl - 1];
net/sched/sch_taprio.c
2197
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
2205
q->qdiscs[cl - 1] = new;
net/sched/sch_taprio.c
2282
struct taprio_sched *q,
net/sched/sch_taprio.c
2300
if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc]))
net/sched/sch_taprio.c
2381
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
2396
if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
net/sched/sch_taprio.c
2397
nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
net/sched/sch_taprio.c
2400
if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
net/sched/sch_taprio.c
2403
if (q->txtime_delay &&
net/sched/sch_taprio.c
2404
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
net/sched/sch_taprio.c
2409
oper = rtnl_dereference(q->oper_sched);
net/sched/sch_taprio.c
2410
admin = rtnl_dereference(q->admin_sched);
net/sched/sch_taprio.c
2412
if (oper && taprio_dump_tc_entries(skb, q, oper))
net/sched/sch_taprio.c
2449
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
2456
return q->qdiscs[ntx];
net/sched/sch_taprio.c
257
static int length_to_duration(struct taprio_sched *q, int len)
net/sched/sch_taprio.c
259
return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
net/sched/sch_taprio.c
262
static int duration_to_length(struct taprio_sched *q, u64 duration)
net/sched/sch_taprio.c
264
return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
net/sched/sch_taprio.c
271
static void taprio_update_queue_max_sdu(struct taprio_sched *q,
net/sched/sch_taprio.c
275
struct net_device *dev = qdisc_dev(q->root);
net/sched/sch_taprio.c
283
max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
net/sched/sch_taprio.c
293
max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
net/sched/sch_taprio.c
335
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
342
packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
net/sched/sch_taprio.c
402
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
408
sched = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
409
admin = rcu_dereference(q->admin_sched);
net/sched/sch_taprio.c
419
static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
net/sched/sch_taprio.c
450
return taprio_mono_to_any(q, skb->skb_mstamp_ns);
net/sched/sch_taprio.c
471
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
478
now = taprio_get_time(q);
net/sched/sch_taprio.c
479
minimum_time = ktime_add_ns(now, q->txtime_delay);
net/sched/sch_taprio.c
481
tcp_tstamp = get_tcp_tstamp(q, skb);
net/sched/sch_taprio.c
485
admin = rcu_dereference(q->admin_sched);
net/sched/sch_taprio.c
486
sched = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
488
switch_schedules(q, &admin, &sched);
net/sched/sch_taprio.c
497
packet_transmit_time = length_to_duration(q, len);
net/sched/sch_taprio.c
543
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
553
sched = rcu_dereference(q->oper_sched);
net/sched/sch_taprio.c
564
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
570
} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
net/sched/sch_taprio.c
577
sch->q.qlen++;
net/sched/sch_taprio.c
630
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
636
child = q->qdiscs[queue];
net/sched/sch_taprio.c
662
static void taprio_set_budgets(struct taprio_sched *q,
net/sched/sch_taprio.c
666
struct net_device *dev = qdisc_dev(q->root);
net/sched/sch_taprio.c
676
atomic64_read(&q->picos_per_byte));
net/sched/sch_taprio.c
710
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
712
struct Qdisc *child = q->qdiscs[txq];
net/sched/sch_taprio.c
723
if (TXTIME_ASSIST_IS_ENABLED(q->flags))
net/sched/sch_taprio.c
737
guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
net/sched/sch_taprio.c
758
sch->q.qlen--;
net/sched/sch_taprio.c
780
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
787
int first_txq = q->cur_txq[tc];
net/sched/sch_taprio.c
793
skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
net/sched/sch_taprio.c
796
taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
net/sched/sch_taprio.c
798
if (q->cur_txq[tc] >= dev->num_tx_queues)
net/sched/sch_taprio.c
799
q->cur_txq[tc] = first_txq;
net/sched/sch_taprio.c
803
} while (q->cur_txq[tc] != first_txq);
net/sched/sch_taprio.c
834
struct taprio_sched *q = qdisc_priv(sch);
net/sched/sch_taprio.c
840
entry = rcu_dereference(q->current_entry);
net/sched/sch_taprio.c
860
if (q->broken_mqprio)
net/sched/sch_taprio.c
920
struct taprio_sched *q = container_of(timer, struct taprio_sched,
net/sched/sch_taprio.c
922
struct net_device *dev = qdisc_dev(q->root);
net/sched/sch_taprio.c
926
struct Qdisc *sch = q->root;
net/sched/sch_taprio.c
930
spin_lock(&q->current_entry_lock);
net/sched/sch_taprio.c
931
entry = rcu_dereference_protected(q->current_entry,
net/sched/sch_taprio.c
932
lockdep_is_held(&q->current_entry_lock));
net/sched/sch_taprio.c
933
oper = rcu_dereference_protected(q->oper_sched,
net/sched/sch_taprio.c
934
lockdep_is_held(&q->current_entry_lock));
net/sched/sch_taprio.c
935
admin = rcu_dereference_protected(q->admin_sched,
net/sched/sch_taprio.c
936
lockdep_is_held(&q->current_entry_lock));
net/sched/sch_taprio.c
939
switch_schedules(q, &admin, &oper);
net/sched/sch_taprio.c
979
switch_schedules(q, &admin, &oper);
net/sched/sch_taprio.c
983
taprio_set_budgets(q, oper, next);
net/sched/sch_taprio.c
986
rcu_assign_pointer(q->current_entry, next);
net/sched/sch_taprio.c
987
spin_unlock(&q->current_entry_lock);
net/sched/sch_taprio.c
989
hrtimer_set_expires(&q->advance_timer, end_time);
net/sched/sch_tbf.c
144
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
154
qopt.replace_params.rate = q->rate;
net/sched/sch_tbf.c
155
qopt.replace_params.max_size = q->max_size;
net/sched/sch_tbf.c
208
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
225
ret = qdisc_enqueue(segs, q->qdisc, to_free);
net/sched/sch_tbf.c
234
sch->q.qlen += nb;
net/sched/sch_tbf.c
249
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
253
if (qdisc_pkt_len(skb) > q->max_size) {
net/sched/sch_tbf.c
255
skb_gso_validate_mac_len(skb, q->max_size))
net/sched/sch_tbf.c
259
ret = qdisc_enqueue(skb, q->qdisc, to_free);
net/sched/sch_tbf.c
267
sch->q.qlen++;
net/sched/sch_tbf.c
271
static bool tbf_peak_present(const struct tbf_sched_data *q)
net/sched/sch_tbf.c
273
return q->peak.rate_bytes_ps;
net/sched/sch_tbf.c
278
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
281
skb = q->qdisc->ops->peek(q->qdisc);
net/sched/sch_tbf.c
290
toks = min_t(s64, now - q->t_c, q->buffer);
net/sched/sch_tbf.c
292
if (tbf_peak_present(q)) {
net/sched/sch_tbf.c
293
ptoks = toks + q->ptokens;
net/sched/sch_tbf.c
294
if (ptoks > q->mtu)
net/sched/sch_tbf.c
295
ptoks = q->mtu;
net/sched/sch_tbf.c
296
ptoks -= (s64) psched_l2t_ns(&q->peak, len);
net/sched/sch_tbf.c
298
toks += q->tokens;
net/sched/sch_tbf.c
299
if (toks > q->buffer)
net/sched/sch_tbf.c
300
toks = q->buffer;
net/sched/sch_tbf.c
301
toks -= (s64) psched_l2t_ns(&q->rate, len);
net/sched/sch_tbf.c
304
skb = qdisc_dequeue_peeked(q->qdisc);
net/sched/sch_tbf.c
308
q->t_c = now;
net/sched/sch_tbf.c
309
q->tokens = toks;
net/sched/sch_tbf.c
310
q->ptokens = ptoks;
net/sched/sch_tbf.c
312
sch->q.qlen--;
net/sched/sch_tbf.c
317
qdisc_watchdog_schedule_ns(&q->watchdog,
net/sched/sch_tbf.c
338
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
340
qdisc_reset(q->qdisc);
net/sched/sch_tbf.c
341
q->t_c = ktime_get_ns();
net/sched/sch_tbf.c
342
q->tokens = q->buffer;
net/sched/sch_tbf.c
343
q->ptokens = q->mtu;
net/sched/sch_tbf.c
344
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_tbf.c
361
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
438
if (q->qdisc != &noop_qdisc) {
net/sched/sch_tbf.c
439
err = fifo_set_limit(q->qdisc, qopt->limit);
net/sched/sch_tbf.c
456
qdisc_purge_queue(q->qdisc);
net/sched/sch_tbf.c
457
old = q->qdisc;
net/sched/sch_tbf.c
458
q->qdisc = child;
net/sched/sch_tbf.c
460
q->limit = qopt->limit;
net/sched/sch_tbf.c
462
q->mtu = mtu;
net/sched/sch_tbf.c
464
q->mtu = PSCHED_TICKS2NS(qopt->mtu);
net/sched/sch_tbf.c
465
q->max_size = max_size;
net/sched/sch_tbf.c
467
q->buffer = buffer;
net/sched/sch_tbf.c
469
q->buffer = PSCHED_TICKS2NS(qopt->buffer);
net/sched/sch_tbf.c
470
q->tokens = q->buffer;
net/sched/sch_tbf.c
471
q->ptokens = q->mtu;
net/sched/sch_tbf.c
473
memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
net/sched/sch_tbf.c
474
memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
net/sched/sch_tbf.c
488
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
490
qdisc_watchdog_init(&q->watchdog, sch);
net/sched/sch_tbf.c
491
q->qdisc = &noop_qdisc;
net/sched/sch_tbf.c
496
q->t_c = ktime_get_ns();
net/sched/sch_tbf.c
503
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
505
qdisc_watchdog_cancel(&q->watchdog);
net/sched/sch_tbf.c
507
qdisc_put(q->qdisc);
net/sched/sch_tbf.c
512
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
525
opt.limit = q->limit;
net/sched/sch_tbf.c
526
psched_ratecfg_getrate(&opt.rate, &q->rate);
net/sched/sch_tbf.c
527
if (tbf_peak_present(q))
net/sched/sch_tbf.c
528
psched_ratecfg_getrate(&opt.peakrate, &q->peak);
net/sched/sch_tbf.c
531
opt.mtu = PSCHED_NS2TICKS(q->mtu);
net/sched/sch_tbf.c
532
opt.buffer = PSCHED_NS2TICKS(q->buffer);
net/sched/sch_tbf.c
535
if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
net/sched/sch_tbf.c
536
nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
net/sched/sch_tbf.c
539
if (tbf_peak_present(q) &&
net/sched/sch_tbf.c
540
q->peak.rate_bytes_ps >= (1ULL << 32) &&
net/sched/sch_tbf.c
541
nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
net/sched/sch_tbf.c
555
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
558
tcm->tcm_info = q->qdisc->handle;
net/sched/sch_tbf.c
566
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
571
*old = qdisc_replace(sch, new, &q->qdisc);
net/sched/sch_tbf.c
579
struct tbf_sched_data *q = qdisc_priv(sch);
net/sched/sch_tbf.c
580
return q->qdisc;
net/sched/sch_teql.c
102
struct net_device *m = qdisc_dev(q);
net/sched/sch_teql.c
110
sch->q.qlen = dat->q.qlen + q->q.qlen;
net/sched/sch_teql.c
126
skb_queue_purge(&dat->q);
net/sched/sch_teql.c
132
struct Qdisc *q, *prev;
net/sched/sch_teql.c
142
q = NEXT_SLAVE(prev);
net/sched/sch_teql.c
143
if (q == sch) {
net/sched/sch_teql.c
144
NEXT_SLAVE(prev) = NEXT_SLAVE(q);
net/sched/sch_teql.c
145
if (q == master->slaves) {
net/sched/sch_teql.c
146
master->slaves = NEXT_SLAVE(q);
net/sched/sch_teql.c
147
if (q == master->slaves) {
net/sched/sch_teql.c
157
skb_queue_purge(&dat->q);
net/sched/sch_teql.c
161
} while ((prev = q) != master->slaves);
net/sched/sch_teql.c
170
struct teql_sched_data *q = qdisc_priv(sch);
net/sched/sch_teql.c
183
q->m = m;
net/sched/sch_teql.c
185
skb_queue_head_init(&q->q);
net/sched/sch_teql.c
207
q->next = NEXT_SLAVE(m->slaves);
net/sched/sch_teql.c
210
q->next = sch;
net/sched/sch_teql.c
282
struct Qdisc *start, *q;
net/sched/sch_teql.c
294
q = start;
net/sched/sch_teql.c
295
if (!q)
net/sched/sch_teql.c
299
struct net_device *slave = qdisc_dev(q);
net/sched/sch_teql.c
302
if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q)
net/sched/sch_teql.c
320
master->slaves = NEXT_SLAVE(q);
net/sched/sch_teql.c
332
master->slaves = NEXT_SLAVE(q);
net/sched/sch_teql.c
339
} while ((q = NEXT_SLAVE(q)) != start);
net/sched/sch_teql.c
360
struct Qdisc *q;
net/sched/sch_teql.c
370
q = m->slaves;
net/sched/sch_teql.c
372
struct net_device *slave = qdisc_dev(q);
net/sched/sch_teql.c
392
} while ((q = NEXT_SLAVE(q)) != m->slaves);
net/sched/sch_teql.c
420
struct Qdisc *q;
net/sched/sch_teql.c
422
q = m->slaves;
net/sched/sch_teql.c
423
if (q) {
net/sched/sch_teql.c
425
if (new_mtu > qdisc_dev(q)->mtu)
net/sched/sch_teql.c
427
} while ((q = NEXT_SLAVE(q)) != m->slaves);
net/sched/sch_teql.c
66
struct sk_buff_head q;
net/sched/sch_teql.c
69
#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
net/sched/sch_teql.c
79
struct teql_sched_data *q = qdisc_priv(sch);
net/sched/sch_teql.c
81
if (q->q.qlen < READ_ONCE(dev->tx_queue_len)) {
net/sched/sch_teql.c
82
__skb_queue_tail(&q->q, skb);
net/sched/sch_teql.c
95
struct Qdisc *q;
net/sched/sch_teql.c
97
skb = __skb_dequeue(&dat->q);
net/sched/sch_teql.c
99
q = rcu_dereference_bh(dat_queue->qdisc);
net/sctp/inqueue.c
241
void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
net/sctp/inqueue.c
243
INIT_WORK(&q->immediate, callback);
net/sctp/inqueue.c
72
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
net/sctp/inqueue.c
85
list_add_tail(&chunk->list, &q->in_chunk_list);
net/sctp/inqueue.c
88
q->immediate.func(&q->immediate);
net/sctp/output.c
678
struct sctp_outq *q = &asoc->outqueue;
net/sctp/output.c
694
inflight = q->outstanding_bytes;
net/sctp/output.c
742
if (chunk->skb->len + q->out_qlen > transport->pathmtu -
net/sctp/outqueue.c
1012
error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
net/sctp/outqueue.c
1031
if (!list_empty(&ctx->q->retransmit))
net/sctp/outqueue.c
1071
if (!list_empty(&ctx->q->retransmit) &&
net/sctp/outqueue.c
1084
while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
net/sctp/outqueue.c
1090
sctp_sched_dequeue_done(ctx->q, chunk);
net/sctp/outqueue.c
1097
sctp_outq_head_data(ctx->q, chunk);
net/sctp/outqueue.c
1104
__func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
net/sctp/outqueue.c
1121
sctp_outq_head_data(ctx->q, chunk);
net/sctp/outqueue.c
1139
sctp_sched_dequeue_done(ctx->q, chunk);
net/sctp/outqueue.c
1175
ctx->q->asoc->base.sk->sk_err = -error;
net/sctp/outqueue.c
1192
static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
net/sctp/outqueue.c
1195
.q = q,
net/sctp/outqueue.c
1198
.asoc = q->asoc,
net/sctp/outqueue.c
1214
if (q->asoc->src_out_of_asoc_ok)
net/sctp/outqueue.c
1248
int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
net/sctp/outqueue.c
1250
struct sctp_association *asoc = q->asoc;
net/sctp/outqueue.c
1330
sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
net/sctp/outqueue.c
1338
sctp_check_transmitted(q, &transport->transmitted,
net/sctp/outqueue.c
1362
sctp_mark_missing(q, &transport->transmitted, transport,
net/sctp/outqueue.c
1372
list_for_each_safe(lchunk, temp, &q->sacked) {
net/sctp/outqueue.c
1392
outstanding = q->outstanding_bytes;
net/sctp/outqueue.c
1401
asoc->stream.si->generate_ftsn(q, sack_ctsn);
net/sctp/outqueue.c
1408
return sctp_outq_is_empty(q);
net/sctp/outqueue.c
1415
int sctp_outq_is_empty(const struct sctp_outq *q)
net/sctp/outqueue.c
1417
return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
net/sctp/outqueue.c
1418
list_empty(&q->retransmit);
net/sctp/outqueue.c
1435
static void sctp_check_transmitted(struct sctp_outq *q,
net/sctp/outqueue.c
1464
sctp_insert_list(&q->abandoned, lchunk);
net/sctp/outqueue.c
1469
if (transmitted_queue != &q->retransmit &&
net/sctp/outqueue.c
1474
q->outstanding_bytes -= sctp_data_size(tchunk);
net/sctp/outqueue.c
1521
q->asoc->peer.primary_path->cacc.
net/sctp/outqueue.c
1558
&q->sacked);
net/sctp/outqueue.c
1655
q->outstanding_bytes -= bytes_acked + migrate_bytes;
net/sctp/outqueue.c
1671
if (!q->asoc->peer.rwnd &&
net/sctp/outqueue.c
1673
(sack_ctsn+2 == q->asoc->next_tsn) &&
net/sctp/outqueue.c
1674
q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
net/sctp/outqueue.c
1678
q->asoc->overall_error_count = 0;
net/sctp/outqueue.c
1708
static void sctp_mark_missing(struct sctp_outq *q,
net/sctp/outqueue.c
1717
struct sctp_association *asoc = q->asoc;
net/sctp/outqueue.c
1762
sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
net/sctp/outqueue.c
1821
void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
net/sctp/outqueue.c
1823
struct sctp_association *asoc = q->asoc;
net/sctp/outqueue.c
1865
list_for_each_safe(lchunk, temp, &q->abandoned) {
net/sctp/outqueue.c
191
void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
net/sctp/outqueue.c
1921
list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
net/sctp/outqueue.c
193
memset(q, 0, sizeof(struct sctp_outq));
net/sctp/outqueue.c
195
q->asoc = asoc;
net/sctp/outqueue.c
196
INIT_LIST_HEAD(&q->out_chunk_list);
net/sctp/outqueue.c
197
INIT_LIST_HEAD(&q->control_chunk_list);
net/sctp/outqueue.c
198
INIT_LIST_HEAD(&q->retransmit);
net/sctp/outqueue.c
199
INIT_LIST_HEAD(&q->sacked);
net/sctp/outqueue.c
200
INIT_LIST_HEAD(&q->abandoned);
net/sctp/outqueue.c
206
static void __sctp_outq_teardown(struct sctp_outq *q)
net/sctp/outqueue.c
213
list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
net/sctp/outqueue.c
219
sctp_chunk_fail(chunk, q->error);
net/sctp/outqueue.c
225
list_for_each_safe(lchunk, temp, &q->sacked) {
net/sctp/outqueue.c
229
sctp_chunk_fail(chunk, q->error);
net/sctp/outqueue.c
234
list_for_each_safe(lchunk, temp, &q->retransmit) {
net/sctp/outqueue.c
238
sctp_chunk_fail(chunk, q->error);
net/sctp/outqueue.c
243
list_for_each_safe(lchunk, temp, &q->abandoned) {
net/sctp/outqueue.c
247
sctp_chunk_fail(chunk, q->error);
net/sctp/outqueue.c
252
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
net/sctp/outqueue.c
253
sctp_sched_dequeue_done(q, chunk);
net/sctp/outqueue.c
256
sctp_chunk_fail(chunk, q->error);
net/sctp/outqueue.c
261
list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
net/sctp/outqueue.c
267
void sctp_outq_teardown(struct sctp_outq *q)
net/sctp/outqueue.c
269
__sctp_outq_teardown(q);
net/sctp/outqueue.c
270
sctp_outq_init(q->asoc, q);
net/sctp/outqueue.c
274
void sctp_outq_free(struct sctp_outq *q)
net/sctp/outqueue.c
277
__sctp_outq_teardown(q);
net/sctp/outqueue.c
281
void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
net/sctp/outqueue.c
283
struct net *net = q->asoc->base.net;
net/sctp/outqueue.c
285
pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
net/sctp/outqueue.c
295
__func__, q, chunk, chunk && chunk->chunk_hdr ?
net/sctp/outqueue.c
299
sctp_outq_tail_data(q, chunk);
net/sctp/outqueue.c
308
list_add_tail(&chunk->list, &q->control_chunk_list);
net/sctp/outqueue.c
312
if (!q->cork)
net/sctp/outqueue.c
313
sctp_outq_flush(q, 0, gfp);
net/sctp/outqueue.c
385
struct sctp_outq *q = &asoc->outqueue;
net/sctp/outqueue.c
389
q->sched->unsched_all(&asoc->stream);
net/sctp/outqueue.c
391
list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
net/sctp/outqueue.c
399
sctp_sched_dequeue_common(q, chk);
net/sctp/outqueue.c
417
q->sched->sched_all(&asoc->stream);
net/sctp/outqueue.c
43
static void sctp_check_transmitted(struct sctp_outq *q,
net/sctp/outqueue.c
450
void sctp_retransmit_mark(struct sctp_outq *q,
net/sctp/outqueue.c
465
sctp_insert_list(&q->abandoned, lchunk);
net/sctp/outqueue.c
476
q->outstanding_bytes -= sctp_data_size(chunk);
net/sctp/outqueue.c
477
q->asoc->peer.rwnd += sctp_data_size(chunk);
net/sctp/outqueue.c
497
q->asoc->peer.rwnd += sctp_data_size(chunk);
net/sctp/outqueue.c
498
q->outstanding_bytes -= sctp_data_size(chunk);
net/sctp/outqueue.c
50
static void sctp_mark_missing(struct sctp_outq *q,
net/sctp/outqueue.c
524
sctp_insert_list(&q->retransmit, lchunk);
net/sctp/outqueue.c
537
void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
net/sctp/outqueue.c
540
struct net *net = q->asoc->base.net;
net/sctp/outqueue.c
56
static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
net/sctp/outqueue.c
560
q->fast_rtx = 1;
net/sctp/outqueue.c
573
sctp_retransmit_mark(q, transport, reason);
net/sctp/outqueue.c
580
q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
net/sctp/outqueue.c
587
sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
net/sctp/outqueue.c
59
static inline void sctp_outq_head_data(struct sctp_outq *q,
net/sctp/outqueue.c
598
static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
net/sctp/outqueue.c
610
lqueue = &q->retransmit;
net/sctp/outqueue.c
611
fast_rtx = q->fast_rtx;
net/sctp/outqueue.c
640
sctp_insert_list(&q->abandoned,
net/sctp/outqueue.c
65
list_add(&ch->list, &q->out_chunk_list);
net/sctp/outqueue.c
66
q->out_qlen += ch->skb->len;
net/sctp/outqueue.c
69
oute = SCTP_SO(&q->asoc->stream, stream)->ext;
net/sctp/outqueue.c
725
q->asoc->stats.rtxchunks++;
net/sctp/outqueue.c
74
static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
net/sctp/outqueue.c
753
q->fast_rtx = 0;
net/sctp/outqueue.c
759
void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
net/sctp/outqueue.c
76
return q->sched->dequeue(q);
net/sctp/outqueue.c
761
if (q->cork)
net/sctp/outqueue.c
762
q->cork = 0;
net/sctp/outqueue.c
764
sctp_outq_flush(q, 0, gfp);
net/sctp/outqueue.c
788
struct sctp_outq *q;
net/sctp/outqueue.c
80
static inline void sctp_outq_tail_data(struct sctp_outq *q,
net/sctp/outqueue.c
86
list_add_tail(&ch->list, &q->out_chunk_list);
net/sctp/outqueue.c
87
q->out_qlen += ch->skb->len;
net/sctp/outqueue.c
885
list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
net/sctp/outqueue.c
90
oute = SCTP_SO(&q->asoc->stream, stream)->ext;
net/sctp/outqueue.c
963
list_add(&chunk->list, &ctx->q->control_chunk_list);
net/sctp/socket.c
172
struct sctp_outq *q = &asoc->outqueue;
net/sctp/socket.c
180
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
net/sctp/socket.c
183
list_for_each_entry(chunk, &q->sacked, transmitted_list)
net/sctp/socket.c
186
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
net/sctp/socket.c
189
list_for_each_entry(chunk, &q->out_chunk_list, list)
net/sctp/stream_interleave.c
1098
static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
net/sctp/stream_interleave.c
1101
struct sctp_association *asoc = q->asoc;
net/sctp/stream_interleave.c
1114
list_for_each_safe(lchunk, temp, &q->abandoned) {
net/sctp/stream_interleave.c
1150
list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
net/sctp/stream_sched.c
235
void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
net/sctp/stream_sched.c
238
!q->asoc->peer.intl_capable) {
net/sctp/stream_sched.c
247
sout = SCTP_SO(&q->asoc->stream, sid);
net/sctp/stream_sched.c
248
q->asoc->stream.out_curr = sout;
net/sctp/stream_sched.c
252
q->asoc->stream.out_curr = NULL;
net/sctp/stream_sched.c
253
q->sched->dequeue_done(q, ch);
net/sctp/stream_sched.c
257
void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch)
net/sctp/stream_sched.c
261
q->out_qlen -= ch->skb->len;
net/sctp/stream_sched.c
53
static void sctp_sched_fcfs_enqueue(struct sctp_outq *q,
net/sctp/stream_sched.c
58
static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q)
net/sctp/stream_sched.c
60
struct sctp_stream *stream = &q->asoc->stream;
net/sctp/stream_sched.c
64
if (list_empty(&q->out_chunk_list))
net/sctp/stream_sched.c
71
entry = q->out_chunk_list.next;
net/sctp/stream_sched.c
75
sctp_sched_dequeue_common(q, ch);
net/sctp/stream_sched.c
81
static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q,
net/sctp/stream_sched_fc.c
107
stream = &q->asoc->stream;
net/sctp/stream_sched_fc.c
111
static struct sctp_chunk *sctp_sched_fc_dequeue(struct sctp_outq *q)
net/sctp/stream_sched_fc.c
113
struct sctp_stream *stream = &q->asoc->stream;
net/sctp/stream_sched_fc.c
118
if (list_empty(&q->out_chunk_list))
net/sctp/stream_sched_fc.c
128
sctp_sched_dequeue_common(q, ch);
net/sctp/stream_sched_fc.c
132
static void sctp_sched_fc_dequeue_done(struct sctp_outq *q,
net/sctp/stream_sched_fc.c
135
struct sctp_stream *stream = &q->asoc->stream;
net/sctp/stream_sched_fc.c
98
static void sctp_sched_fc_enqueue(struct sctp_outq *q,
net/sctp/stream_sched_prio.c
215
static void sctp_sched_prio_enqueue(struct sctp_outq *q,
net/sctp/stream_sched_prio.c
224
stream = &q->asoc->stream;
net/sctp/stream_sched_prio.c
228
static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q)
net/sctp/stream_sched_prio.c
230
struct sctp_stream *stream = &q->asoc->stream;
net/sctp/stream_sched_prio.c
236
if (list_empty(&q->out_chunk_list))
net/sctp/stream_sched_prio.c
250
sctp_sched_dequeue_common(q, ch);
net/sctp/stream_sched_prio.c
256
static void sctp_sched_prio_dequeue_done(struct sctp_outq *q,
net/sctp/stream_sched_prio.c
267
soute = SCTP_SO(&q->asoc->stream, sid)->ext;
net/sctp/stream_sched_rr.c
106
stream = &q->asoc->stream;
net/sctp/stream_sched_rr.c
110
static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q)
net/sctp/stream_sched_rr.c
112
struct sctp_stream *stream = &q->asoc->stream;
net/sctp/stream_sched_rr.c
117
if (list_empty(&q->out_chunk_list))
net/sctp/stream_sched_rr.c
127
sctp_sched_dequeue_common(q, ch);
net/sctp/stream_sched_rr.c
133
static void sctp_sched_rr_dequeue_done(struct sctp_outq *q,
net/sctp/stream_sched_rr.c
141
soute = SCTP_SO(&q->asoc->stream, sid)->ext;
net/sctp/stream_sched_rr.c
143
sctp_sched_rr_next_stream(&q->asoc->stream);
net/sctp/stream_sched_rr.c
146
sctp_sched_rr_unsched(&q->asoc->stream, soute);
net/sctp/stream_sched_rr.c
97
static void sctp_sched_rr_enqueue(struct sctp_outq *q,
net/smc/smc_llc.c
1865
struct smc_llc_qentry *qentry, *q;
net/smc/smc_llc.c
1868
list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
net/sunrpc/auth_gss/auth_gss.c
181
const void *q;
net/sunrpc/auth_gss/auth_gss.c
225
q = (const void *)((const char *)p + seclen);
net/sunrpc/auth_gss/auth_gss.c
226
if (unlikely(q > end || q < p)) {
net/sunrpc/auth_gss/auth_gss.c
238
if (q == end) {
net/sunrpc/auth_gss/auth_gss.c
239
p = q;
net/sunrpc/auth_gss/auth_gss.c
244
p = simple_get_netobj(q, end, &ctx->gc_acceptor);
net/sunrpc/auth_gss/auth_gss_internal.h
18
const void *q = (const void *)((const char *)p + len);
net/sunrpc/auth_gss/auth_gss_internal.h
19
if (unlikely(q > end || q < p))
net/sunrpc/auth_gss/auth_gss_internal.h
22
return q;
net/sunrpc/auth_gss/auth_gss_internal.h
28
const void *q;
net/sunrpc/auth_gss/auth_gss_internal.h
34
q = (const void *)((const char *)p + len);
net/sunrpc/auth_gss/auth_gss_internal.h
35
if (unlikely(q > end || q < p))
net/sunrpc/auth_gss/auth_gss_internal.h
44
return q;
net/sunrpc/cache.c
1019
for (cq= &rp->q; &cq->list != &cd->queue;
net/sunrpc/cache.c
1023
container_of(cq, struct cache_request, q);
net/sunrpc/cache.c
1047
rp->q.reader = 1;
net/sunrpc/cache.c
1050
list_add(&rp->q.list, &cd->queue);
net/sunrpc/cache.c
1070
for (cq = &rp->q; &cq->list != &cd->queue;
net/sunrpc/cache.c
1076
struct cache_request, q);
net/sunrpc/cache.c
1081
list_del(&cr->q.list);
net/sunrpc/cache.c
1088
list_del(&rp->q.list);
net/sunrpc/cache.c
1119
cr = container_of(cq, struct cache_request, q);
net/sunrpc/cache.c
1127
list_move(&cr->q.list, &dequeued);
net/sunrpc/cache.c
1131
cr = list_entry(dequeued.next, struct cache_request, q.list);
net/sunrpc/cache.c
1132
list_del(&cr->q.list);
net/sunrpc/cache.c
1250
crq->q.reader = 0;
net/sunrpc/cache.c
1257
list_add_tail(&crq->q.list, &detail->queue);
net/sunrpc/cache.c
813
struct cache_queue q;
net/sunrpc/cache.c
820
struct cache_queue q;
net/sunrpc/cache.c
852
while (rp->q.list.next != &cd->queue &&
net/sunrpc/cache.c
853
list_entry(rp->q.list.next, struct cache_queue, list)
net/sunrpc/cache.c
855
struct list_head *next = rp->q.list.next;
net/sunrpc/cache.c
856
list_move(&rp->q.list, next);
net/sunrpc/cache.c
858
if (rp->q.list.next == &cd->queue) {
net/sunrpc/cache.c
864
rq = container_of(rp->q.list.next, struct cache_request, q.list);
net/sunrpc/cache.c
865
WARN_ON_ONCE(rq->q.reader);
net/sunrpc/cache.c
880
list_move(&rp->q.list, &rq->q.list);
net/sunrpc/cache.c
892
list_move(&rp->q.list, &rq->q.list);
net/sunrpc/cache.c
904
list_del(&rq->q.list);
net/sunrpc/cache.c
993
for (cq= &rp->q; &cq->list != &cd->queue;
net/sunrpc/sched.c
1210
struct workqueue_struct *q)
net/sunrpc/sched.c
1212
if (q != NULL) {
net/sunrpc/sched.c
1214
queue_work(q, &task->u.tk_work);
net/sunrpc/sched.c
1219
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
net/sunrpc/sched.c
1223
rpc_final_put_task(task, q);
net/sunrpc/sched.c
147
__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
net/sunrpc/sched.c
151
list_for_each_entry(t, q, u.tk_wait.list) {
net/sunrpc/sched.c
156
task->u.tk_wait.list.next = q;
net/sunrpc/sched.c
162
list_add_tail(&task->u.tk_wait.list, q);
net/sunrpc/sched.c
171
struct list_head *q;
net/sunrpc/sched.c
183
q = t->u.tk_wait.list.next;
net/sunrpc/sched.c
184
list_add_tail(&t->u.tk_wait.list, q);
net/sunrpc/sched.c
384
static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
net/sunrpc/sched.c
388
trace_rpc_task_sleep(task, q);
net/sunrpc/sched.c
390
__rpc_add_wait_queue(q, task, queue_priority);
net/sunrpc/sched.c
393
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
net/sunrpc/sched.c
399
__rpc_do_sleep_on_priority(q, task, queue_priority);
net/sunrpc/sched.c
402
static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
net/sunrpc/sched.c
409
__rpc_do_sleep_on_priority(q, task, queue_priority);
net/sunrpc/sched.c
410
__rpc_add_timer(q, task, timeout);
net/sunrpc/sched.c
432
void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
net/sunrpc/sched.c
443
spin_lock(&q->lock);
net/sunrpc/sched.c
444
__rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
net/sunrpc/sched.c
445
spin_unlock(&q->lock);
net/sunrpc/sched.c
449
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
net/sunrpc/sched.c
461
spin_lock(&q->lock);
net/sunrpc/sched.c
462
__rpc_sleep_on_priority(q, task, task->tk_priority);
net/sunrpc/sched.c
463
spin_unlock(&q->lock);
net/sunrpc/sched.c
467
void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
net/sunrpc/sched.c
477
spin_lock(&q->lock);
net/sunrpc/sched.c
478
__rpc_sleep_on_priority_timeout(q, task, timeout, priority);
net/sunrpc/sched.c
479
spin_unlock(&q->lock);
net/sunrpc/sched.c
483
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
net/sunrpc/sched.c
494
spin_lock(&q->lock);
net/sunrpc/sched.c
495
__rpc_sleep_on_priority(q, task, priority);
net/sunrpc/sched.c
496
spin_unlock(&q->lock);
net/sunrpc/sched.c
607
struct list_head *q;
net/sunrpc/sched.c
613
q = &queue->tasks[RPC_NR_PRIORITY - 1];
net/sunrpc/sched.c
614
if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) {
net/sunrpc/sched.c
615
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
net/sunrpc/sched.c
622
q = &queue->tasks[queue->priority];
net/sunrpc/sched.c
623
if (!list_empty(q) && queue->nr) {
net/sunrpc/sched.c
625
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
net/sunrpc/sched.c
633
if (q == &queue->tasks[0])
net/sunrpc/sched.c
634
q = &queue->tasks[queue->maxpriority];
net/sunrpc/sched.c
636
q = q - 1;
net/sunrpc/sched.c
637
if (!list_empty(q)) {
net/sunrpc/sched.c
638
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
net/sunrpc/sched.c
641
} while (q != &queue->tasks[queue->priority]);
net/sunrpc/sched.c
647
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
net/sunrpc/svc_xprt.c
1131
struct llist_node *q, **t1, *t2;
net/sunrpc/svc_xprt.c
1133
q = lwq_dequeue_all(&pool->sp_xprts);
net/sunrpc/svc_xprt.c
1134
lwq_for_each_safe(xprt, t1, t2, &q, xpt_ready) {
net/sunrpc/svc_xprt.c
1142
if (q)
net/sunrpc/svc_xprt.c
1143
lwq_enqueue_batch(q, &pool->sp_xprts);
net/sunrpc/xdr.c
1080
__be32 *q;
net/sunrpc/xdr.c
1086
q = p + (nbytes >> 2);
net/sunrpc/xdr.c
1087
if (unlikely(q > xdr->end || q < p))
net/sunrpc/xdr.c
1089
xdr->p = q;
net/sunrpc/xdr.c
1454
__be32 *q = p + nwords;
net/sunrpc/xdr.c
1456
if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
net/sunrpc/xdr.c
1458
xdr->p = q;
net/unix/af_unix.c
499
static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
net/unix/af_unix.c
505
u = container_of(q, struct unix_sock, peer_wake);
net/unix/af_unix.c
508
q);
net/x25/x25_in.c
211
static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
net/x25/x25_in.c
421
int queued = 0, frametype, ns, nr, q, d, m;
net/x25/x25_in.c
426
frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
net/x25/x25_in.c
436
queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
net/x25/x25_subr.c
261
int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
net/x25/x25_subr.c
271
*ns = *nr = *q = *d = *m = 0;
net/x25/x25_subr.c
316
*q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
net/x25/x25_subr.c
325
*q = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
net/xdp/xsk.c
1146
struct xsk_queue *q;
net/xdp/xsk.c
1151
q = xskq_create(entries, umem_queue);
net/xdp/xsk.c
1152
if (!q)
net/xdp/xsk.c
1157
WRITE_ONCE(*queue, q);
net/xdp/xsk.c
1489
struct xsk_queue **q;
net/xdp/xsk.c
1502
q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
net/xdp/xsk.c
1503
err = xsk_init_queue(entries, q, false);
net/xdp/xsk.c
1556
struct xsk_queue **q;
net/xdp/xsk.c
1570
q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
net/xdp/xsk.c
1572
err = xsk_init_queue(entries, q, true);
net/xdp/xsk.c
1756
struct xsk_queue *q = NULL;
net/xdp/xsk.c
1762
q = READ_ONCE(xs->rx);
net/xdp/xsk.c
1764
q = READ_ONCE(xs->tx);
net/xdp/xsk.c
1769
q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
net/xdp/xsk.c
1772
q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
net/xdp/xsk.c
1776
if (!q)
net/xdp/xsk.c
1781
if (size > q->ring_vmalloc_size)
net/xdp/xsk.c
1784
return remap_vmalloc_range(vma, q->ring, 0);
net/xdp/xsk_queue.c
14
static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
net/xdp/xsk_queue.c
20
return struct_size(umem_ring, desc, q->nentries);
net/xdp/xsk_queue.c
21
return struct_size(rxtx_ring, desc, q->nentries);
net/xdp/xsk_queue.c
26
struct xsk_queue *q;
net/xdp/xsk_queue.c
29
q = kzalloc_obj(*q);
net/xdp/xsk_queue.c
30
if (!q)
net/xdp/xsk_queue.c
33
q->nentries = nentries;
net/xdp/xsk_queue.c
34
q->ring_mask = nentries - 1;
net/xdp/xsk_queue.c
36
size = xskq_get_ring_size(q, umem_queue);
net/xdp/xsk_queue.c
43
kfree(q);
net/xdp/xsk_queue.c
49
q->ring = vmalloc_user(size);
net/xdp/xsk_queue.c
50
if (!q->ring) {
net/xdp/xsk_queue.c
51
kfree(q);
net/xdp/xsk_queue.c
55
q->ring_vmalloc_size = size;
net/xdp/xsk_queue.c
56
return q;
net/xdp/xsk_queue.c
59
void xskq_destroy(struct xsk_queue *q)
net/xdp/xsk_queue.c
61
if (!q)
net/xdp/xsk_queue.c
64
vfree(q->ring);
net/xdp/xsk_queue.c
65
kfree(q);
net/xdp/xsk_queue.h
125
static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr)
net/xdp/xsk_queue.h
127
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
net/xdp/xsk_queue.h
128
u32 idx = cached_cons & q->ring_mask;
net/xdp/xsk_queue.h
133
static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
net/xdp/xsk_queue.h
135
if (q->cached_cons != q->cached_prod) {
net/xdp/xsk_queue.h
136
__xskq_cons_read_addr_unchecked(q, q->cached_cons, addr);
net/xdp/xsk_queue.h
222
static inline bool xskq_has_descs(struct xsk_queue *q)
net/xdp/xsk_queue.h
224
return q->cached_cons != q->cached_prod;
net/xdp/xsk_queue.h
227
static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
net/xdp/xsk_queue.h
232
q->invalid_descs++;
net/xdp/xsk_queue.h
238
static inline bool xskq_cons_read_desc(struct xsk_queue *q,
net/xdp/xsk_queue.h
242
if (q->cached_cons != q->cached_prod) {
net/xdp/xsk_queue.h
243
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
net/xdp/xsk_queue.h
244
u32 idx = q->cached_cons & q->ring_mask;
net/xdp/xsk_queue.h
247
return xskq_cons_is_valid_desc(q, desc, pool);
net/xdp/xsk_queue.h
250
q->queue_empty_descs++;
net/xdp/xsk_queue.h
254
static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
net/xdp/xsk_queue.h
256
q->cached_cons += cnt;
net/xdp/xsk_queue.h
259
static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool,
net/xdp/xsk_queue.h
262
parsed->valid = xskq_cons_is_valid_desc(q, desc, pool);
net/xdp/xsk_queue.h
267
u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool,
net/xdp/xsk_queue.h
270
u32 cached_cons = q->cached_cons, nb_entries = 0;
net/xdp/xsk_queue.h
277
while (cached_cons != q->cached_prod && nb_entries < max) {
net/xdp/xsk_queue.h
278
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
net/xdp/xsk_queue.h
279
u32 idx = cached_cons & q->ring_mask;
net/xdp/xsk_queue.h
284
parse_desc(q, pool, &descs[nb_entries], &parsed);
net/xdp/xsk_queue.h
303
xskq_cons_release_n(q, cached_cons - q->cached_cons);
net/xdp/xsk_queue.h
309
static inline void __xskq_cons_release(struct xsk_queue *q)
net/xdp/xsk_queue.h
311
smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
net/xdp/xsk_queue.h
314
static inline void __xskq_cons_peek(struct xsk_queue *q)
net/xdp/xsk_queue.h
317
q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
net/xdp/xsk_queue.h
320
static inline void xskq_cons_get_entries(struct xsk_queue *q)
net/xdp/xsk_queue.h
322
__xskq_cons_release(q);
net/xdp/xsk_queue.h
323
__xskq_cons_peek(q);
net/xdp/xsk_queue.h
326
static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
net/xdp/xsk_queue.h
328
u32 entries = q->cached_prod - q->cached_cons;
net/xdp/xsk_queue.h
333
__xskq_cons_peek(q);
net/xdp/xsk_queue.h
334
entries = q->cached_prod - q->cached_cons;
net/xdp/xsk_queue.h
339
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
net/xdp/xsk_queue.h
341
if (q->cached_prod == q->cached_cons)
net/xdp/xsk_queue.h
342
xskq_cons_get_entries(q);
net/xdp/xsk_queue.h
343
return xskq_cons_read_addr_unchecked(q, addr);
net/xdp/xsk_queue.h
346
static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
net/xdp/xsk_queue.h
350
if (q->cached_prod == q->cached_cons)
net/xdp/xsk_queue.h
351
xskq_cons_get_entries(q);
net/xdp/xsk_queue.h
352
return xskq_cons_read_desc(q, desc, pool);
net/xdp/xsk_queue.h
359
static inline void xskq_cons_release(struct xsk_queue *q)
net/xdp/xsk_queue.h
361
q->cached_cons++;
net/xdp/xsk_queue.h
364
static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt)
net/xdp/xsk_queue.h
366
q->cached_cons -= cnt;
net/xdp/xsk_queue.h
369
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
net/xdp/xsk_queue.h
372
return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
net/xdp/xsk_queue.h
377
static inline u32 xskq_get_prod(struct xsk_queue *q)
net/xdp/xsk_queue.h
379
return READ_ONCE(q->ring->producer);
net/xdp/xsk_queue.h
382
static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
net/xdp/xsk_queue.h
384
u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
net/xdp/xsk_queue.h
390
q->cached_cons = READ_ONCE(q->ring->consumer);
net/xdp/xsk_queue.h
391
free_entries = q->nentries - (q->cached_prod - q->cached_cons);
net/xdp/xsk_queue.h
396
static inline bool xskq_prod_is_full(struct xsk_queue *q)
net/xdp/xsk_queue.h
398
return xskq_prod_nb_free(q, 1) ? false : true;
net/xdp/xsk_queue.h
401
static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
net/xdp/xsk_queue.h
403
q->cached_prod -= cnt;
net/xdp/xsk_queue.h
406
static inline int xskq_prod_reserve(struct xsk_queue *q)
net/xdp/xsk_queue.h
408
if (xskq_prod_is_full(q))
net/xdp/xsk_queue.h
412
q->cached_prod++;
net/xdp/xsk_queue.h
416
static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
net/xdp/xsk_queue.h
418
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
net/xdp/xsk_queue.h
420
if (xskq_prod_is_full(q))
net/xdp/xsk_queue.h
424
ring->desc[q->cached_prod++ & q->ring_mask] = addr;
net/xdp/xsk_queue.h
428
static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr)
net/xdp/xsk_queue.h
430
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
net/xdp/xsk_queue.h
432
ring->desc[idx & q->ring_mask] = addr;
net/xdp/xsk_queue.h
435
static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
net/xdp/xsk_queue.h
438
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
net/xdp/xsk_queue.h
442
cached_prod = q->cached_prod;
net/xdp/xsk_queue.h
444
ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
net/xdp/xsk_queue.h
445
q->cached_prod = cached_prod;
net/xdp/xsk_queue.h
448
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
net/xdp/xsk_queue.h
451
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
net/xdp/xsk_queue.h
454
if (xskq_prod_is_full(q))
net/xdp/xsk_queue.h
458
idx = q->cached_prod++ & q->ring_mask;
net/xdp/xsk_queue.h
466
static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
net/xdp/xsk_queue.h
468
smp_store_release(&q->ring->producer, idx); /* B, matches C */
net/xdp/xsk_queue.h
471
static inline void xskq_prod_submit(struct xsk_queue *q)
net/xdp/xsk_queue.h
473
__xskq_prod_submit(q, q->cached_prod);
net/xdp/xsk_queue.h
476
static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
net/xdp/xsk_queue.h
478
__xskq_prod_submit(q, q->ring->producer + nb_entries);
net/xdp/xsk_queue.h
481
static inline bool xskq_prod_is_empty(struct xsk_queue *q)
net/xdp/xsk_queue.h
484
return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
net/xdp/xsk_queue.h
489
static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
net/xdp/xsk_queue.h
491
return q ? q->invalid_descs : 0;
net/xdp/xsk_queue.h
494
static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
net/xdp/xsk_queue.h
496
return q ? q->queue_empty_descs : 0;
net/xfrm/xfrm_iptfs.c
2525
u64 q;
net/xfrm/xfrm_iptfs.c
2569
q = (u64)xc->max_queue_size * 95;
net/xfrm/xfrm_iptfs.c
2570
do_div(q, 100);
net/xfrm/xfrm_iptfs.c
2571
xtfs->ecn_queue_size = (u32)q;
net/xfrm/xfrm_iptfs.c
2601
u64 q;
net/xfrm/xfrm_iptfs.c
2604
q = xtfs->drop_time_ns;
net/xfrm/xfrm_iptfs.c
2605
do_div(q, NSECS_IN_USEC);
net/xfrm/xfrm_iptfs.c
2606
ret = nla_put_u32(skb, XFRMA_IPTFS_DROP_TIME, q);
net/xfrm/xfrm_iptfs.c
2619
q = xtfs->init_delay_ns;
net/xfrm/xfrm_iptfs.c
2620
do_div(q, NSECS_IN_USEC);
net/xfrm/xfrm_iptfs.c
2621
ret = nla_put_u32(skb, XFRMA_IPTFS_INIT_DELAY, q);
samples/v4l/v4l2-pci-skeleton.c
747
struct vb2_queue *q;
samples/v4l/v4l2-pci-skeleton.c
806
q = &skel->queue;
samples/v4l/v4l2-pci-skeleton.c
807
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
samples/v4l/v4l2-pci-skeleton.c
808
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
samples/v4l/v4l2-pci-skeleton.c
809
q->dev = &pdev->dev;
samples/v4l/v4l2-pci-skeleton.c
810
q->drv_priv = skel;
samples/v4l/v4l2-pci-skeleton.c
811
q->buf_struct_size = sizeof(struct skel_buffer);
samples/v4l/v4l2-pci-skeleton.c
812
q->ops = &skel_qops;
samples/v4l/v4l2-pci-skeleton.c
813
q->mem_ops = &vb2_dma_contig_memops;
samples/v4l/v4l2-pci-skeleton.c
814
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
samples/v4l/v4l2-pci-skeleton.c
820
q->min_queued_buffers = 2;
samples/v4l/v4l2-pci-skeleton.c
830
q->lock = &skel->lock;
samples/v4l/v4l2-pci-skeleton.c
835
q->gfp_flags = GFP_DMA32;
samples/v4l/v4l2-pci-skeleton.c
836
ret = vb2_queue_init(q);
samples/v4l/v4l2-pci-skeleton.c
861
vdev->queue = q;
scripts/asn1_compiler.c
349
char *line, *nl, *start, *p, *q;
scripts/asn1_compiler.c
381
q = p + 2;
scripts/asn1_compiler.c
382
while ((q = memchr(q, '-', nl - q))) {
scripts/asn1_compiler.c
383
if (q[1] == '-') {
scripts/asn1_compiler.c
385
q += 2;
scripts/asn1_compiler.c
386
memmove(p, q, nl - q);
scripts/asn1_compiler.c
389
q++;
scripts/asn1_compiler.c
417
q = p + 1;
scripts/asn1_compiler.c
418
while (q < nl && (isalnum(*q) || *q == '-' || *q == '_'))
scripts/asn1_compiler.c
419
q++;
scripts/asn1_compiler.c
420
tokens[tix].size = q - p;
scripts/asn1_compiler.c
421
p = q;
scripts/asn1_compiler.c
458
q = p + 1;
scripts/asn1_compiler.c
459
while (q < nl && (isdigit(*q)))
scripts/asn1_compiler.c
460
q++;
scripts/asn1_compiler.c
461
tokens[tix].size = q - p;
scripts/asn1_compiler.c
462
p = q;
scripts/basic/fixdep.c
191
const char *q, *r;
scripts/basic/fixdep.c
200
q = p;
scripts/basic/fixdep.c
201
while (isalnum(*q) || *q == '_')
scripts/basic/fixdep.c
202
q++;
scripts/basic/fixdep.c
203
if (str_ends_with(p, q - p, "_MODULE"))
scripts/basic/fixdep.c
204
r = q - 7;
scripts/basic/fixdep.c
206
r = q;
scripts/basic/fixdep.c
209
p = q;
scripts/basic/fixdep.c
267
char *q, saved_c;
scripts/basic/fixdep.c
324
q = p;
scripts/basic/fixdep.c
325
while (*q != ' ' && *q != '\t' && *q != '\n' && *q != '#' && *q != ':') {
scripts/basic/fixdep.c
326
if (*q == '\\') {
scripts/basic/fixdep.c
331
if (*(q + 1) == '\n')
scripts/basic/fixdep.c
335
if (*(q + 1) == '#' || *(q + 1) == ':') {
scripts/basic/fixdep.c
336
memmove(p + 1, p, q - p);
scripts/basic/fixdep.c
340
q++;
scripts/basic/fixdep.c
343
if (*q == '\0')
scripts/basic/fixdep.c
345
q++;
scripts/basic/fixdep.c
350
p = q;
scripts/basic/fixdep.c
354
saved_c = *q;
scripts/basic/fixdep.c
355
*q = '\0';
scripts/basic/fixdep.c
382
} else if (!is_ignored_file(p, q - p) &&
scripts/basic/fixdep.c
383
!in_hashtable(p, q - p, file_hashtab)) {
scripts/basic/fixdep.c
388
if (need_parse && !is_no_parse_file(p, q - p)) {
scripts/basic/fixdep.c
397
*q = saved_c;
scripts/basic/fixdep.c
398
p = q;
scripts/dtc/data.c
60
char *q;
scripts/dtc/data.c
65
q = d.val;
scripts/dtc/data.c
72
q[d.len++] = c;
scripts/dtc/data.c
75
q[d.len++] = '\0';
scripts/dtc/libfdt/fdt_ro.c
263
const char *q = memchr(path, '/', end - p);
scripts/dtc/libfdt/fdt_ro.c
265
if (!q)
scripts/dtc/libfdt/fdt_ro.c
266
q = end;
scripts/dtc/libfdt/fdt_ro.c
268
p = fdt_get_alias_namelen(fdt, p, q - p);
scripts/dtc/libfdt/fdt_ro.c
273
p = q;
scripts/dtc/libfdt/fdt_ro.c
277
const char *q;
scripts/dtc/libfdt/fdt_ro.c
284
q = memchr(p, '/', end - p);
scripts/dtc/libfdt/fdt_ro.c
285
if (! q)
scripts/dtc/libfdt/fdt_ro.c
286
q = end;
scripts/dtc/libfdt/fdt_ro.c
288
offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
scripts/dtc/libfdt/fdt_ro.c
292
p = q;
scripts/kconfig/preprocess.c
463
const char *q;
scripts/kconfig/preprocess.c
477
q = p;
scripts/kconfig/preprocess.c
478
while (*q) {
scripts/kconfig/preprocess.c
479
if (*q == '(') {
scripts/kconfig/preprocess.c
481
} else if (*q == ')') {
scripts/kconfig/preprocess.c
485
q++;
scripts/kconfig/preprocess.c
488
if (!*q)
scripts/kconfig/preprocess.c
492
*str = q + 1;
scripts/kconfig/preprocess.c
494
return eval_clause(p, q - p, argc, argv);
security/integrity/evm/evm_main.c
1118
struct list_head *pos, *q;
security/integrity/evm/evm_main.c
1135
list_for_each_safe(pos, q, &evm_config_xattrnames)
security/keys/keyctl_pkey.c
42
char *c = params->info, *p, *q;
security/keys/keyctl_pkey.c
53
q = args[0].from;
security/keys/keyctl_pkey.c
54
if (!q[0])
security/keys/keyctl_pkey.c
59
params->encoding = q;
security/keys/keyctl_pkey.c
63
params->hash_algo = q;
security/selinux/hooks.c
2628
char *p, *q;
security/selinux/hooks.c
2632
for (p = q = arg; p < from + len; p++) {
security/selinux/hooks.c
2635
*q++ = c;
security/selinux/hooks.c
2637
arg = kmemdup_nul(arg, q - arg, GFP_KERNEL);
security/selinux/hooks.c
3756
struct qstr q;
security/selinux/hooks.c
3760
q.name = kn_name;
security/selinux/hooks.c
3761
q.hash_len = hashlen_string(kn_dir, kn_name);
security/selinux/hooks.c
3764
parent_sid, secclass, &q,
sound/core/misc.c
43
const struct snd_pci_quirk *q;
sound/core/misc.c
45
for (q = list; q->subvendor || q->subdevice; q++) {
sound/core/misc.c
46
if (q->subvendor != vendor)
sound/core/misc.c
48
if (!q->subdevice ||
sound/core/misc.c
49
(device & q->subdevice_mask) == q->subdevice)
sound/core/misc.c
50
return q;
sound/core/pcm_lib.c
1008
diff = q * den - num;
sound/core/pcm_lib.c
580
unsigned int q;
sound/core/pcm_lib.c
583
q = div32(a, b, &r);
sound/core/pcm_lib.c
585
++q;
sound/core/pcm_lib.c
586
return q;
sound/core/pcm_lib.c
847
unsigned int q = i->min;
sound/core/pcm_lib.c
849
if (q == 0)
sound/core/pcm_lib.c
850
q = 1;
sound/core/pcm_lib.c
851
den = div_up(num, q);
sound/core/pcm_lib.c
862
diff = num - q * den;
sound/core/pcm_lib.c
886
unsigned int q = i->max;
sound/core/pcm_lib.c
888
if (q == 0) {
sound/core/pcm_lib.c
892
den = div_down(num, q);
sound/core/pcm_lib.c
903
diff = q * den - num;
sound/core/pcm_lib.c
963
unsigned int q = i->min;
sound/core/pcm_lib.c
965
num = mul(q, den);
sound/core/pcm_lib.c
976
diff = num - q * den;
sound/core/pcm_lib.c
995
unsigned int q = i->max;
sound/core/pcm_lib.c
997
num = mul(q, den);
sound/core/seq/oss/seq_oss_device.h
122
void snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf);
sound/core/seq/oss/seq_oss_event.c
102
return note_on_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev);
sound/core/seq/oss/seq_oss_event.c
110
q->n.chn, 0, q->n.note, ev);
sound/core/seq/oss/seq_oss_event.c
121
extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
125
switch (q->e.cmd) {
sound/core/seq/oss/seq_oss_event.c
127
return note_off_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev);
sound/core/seq/oss/seq_oss_event.c
130
return note_on_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev);
sound/core/seq/oss/seq_oss_event.c
133
return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_PGMCHANGE,
sound/core/seq/oss/seq_oss_event.c
134
q->e.chn, 0, q->e.p1, ev);
sound/core/seq/oss/seq_oss_event.c
137
return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_CHANPRESS,
sound/core/seq/oss/seq_oss_event.c
138
q->e.chn, 0, q->e.p1, ev);
sound/core/seq/oss/seq_oss_event.c
142
val = (char)q->e.p1;
sound/core/seq/oss/seq_oss_event.c
144
return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_CONTROLLER,
sound/core/seq/oss/seq_oss_event.c
145
q->e.chn, CTL_PAN, val, ev);
sound/core/seq/oss/seq_oss_event.c
148
val = ((short)q->e.p3 << 8) | (short)q->e.p2;
sound/core/seq/oss/seq_oss_event.c
149
switch (q->e.p1) {
sound/core/seq/oss/seq_oss_event.c
152
return set_control_event(dp, q->e.dev,
sound/core/seq/oss/seq_oss_event.c
154
q->e.chn, 0, val, ev);
sound/core/seq/oss/seq_oss_event.c
157
return set_control_event(dp, q->e.dev,
sound/core/seq/oss/seq_oss_event.c
159
q->e.chn, 0, val*128/100, ev);
sound/core/seq/oss/seq_oss_event.c
161
return set_control_event(dp, q->e.dev,
sound/core/seq/oss/seq_oss_event.c
163
q->e.chn, q->e.p1, val, ev);
sound/core/seq/oss/seq_oss_event.c
167
return snd_seq_oss_synth_raw_event(dp, q->e.dev, q->c, ev);
sound/core/seq/oss/seq_oss_event.c
175
chn_voice_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
177
if (q->v.chn >= 32)
sound/core/seq/oss/seq_oss_event.c
179
switch (q->v.cmd) {
sound/core/seq/oss/seq_oss_event.c
181
return note_on_event(dp, q->v.dev, q->v.chn, q->v.note, q->v.parm, ev);
sound/core/seq/oss/seq_oss_event.c
184
return note_off_event(dp, q->v.dev, q->v.chn, q->v.note, q->v.parm, ev);
sound/core/seq/oss/seq_oss_event.c
187
return set_note_event(dp, q->v.dev, SNDRV_SEQ_EVENT_KEYPRESS,
sound/core/seq/oss/seq_oss_event.c
188
q->v.chn, q->v.note, q->v.parm, ev);
sound/core/seq/oss/seq_oss_event.c
196
chn_common_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
198
if (q->l.chn >= 32)
sound/core/seq/oss/seq_oss_event.c
200
switch (q->l.cmd) {
sound/core/seq/oss/seq_oss_event.c
202
return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PGMCHANGE,
sound/core/seq/oss/seq_oss_event.c
203
q->l.chn, 0, q->l.p1, ev);
sound/core/seq/oss/seq_oss_event.c
206
return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CONTROLLER,
sound/core/seq/oss/seq_oss_event.c
207
q->l.chn, q->l.p1, q->l.val, ev);
sound/core/seq/oss/seq_oss_event.c
211
return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PITCHBEND,
sound/core/seq/oss/seq_oss_event.c
212
q->l.chn, 0, q->l.val - 8192, ev);
sound/core/seq/oss/seq_oss_event.c
215
return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CHANPRESS,
sound/core/seq/oss/seq_oss_event.c
216
q->l.chn, 0, q->l.val, ev);
sound/core/seq/oss/seq_oss_event.c
22
static int extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
sound/core/seq/oss/seq_oss_event.c
223
timing_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
225
switch (q->t.cmd) {
sound/core/seq/oss/seq_oss_event.c
228
return set_echo_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
233
tmp.echo = (q->t.time << 8) | SEQ_ECHO;
sound/core/seq/oss/seq_oss_event.c
249
return snd_seq_oss_timer_tempo(dp->timer, q->t.time);
sound/core/seq/oss/seq_oss_event.c
258
local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
27
static int old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
sound/core/seq/oss/seq_oss_event.c
42
snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
44
switch (q->s.code) {
sound/core/seq/oss/seq_oss_event.c
46
return extended_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
49
return chn_voice_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
52
return chn_common_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
55
return timing_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
58
return local_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
61
return snd_seq_oss_synth_sysex(dp, q->x.dev, q->x.buf, ev);
sound/core/seq/oss/seq_oss_event.c
69
if (snd_seq_oss_midi_open(dp, q->s.dev, SNDRV_SEQ_OSS_FILE_WRITE))
sound/core/seq/oss/seq_oss_event.c
71
if (snd_seq_oss_midi_filemode(dp, q->s.dev) & SNDRV_SEQ_OSS_FILE_WRITE)
sound/core/seq/oss/seq_oss_event.c
72
return snd_seq_oss_midi_putc(dp, q->s.dev, q->s.parm1, ev);
sound/core/seq/oss/seq_oss_event.c
78
return set_echo_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
83
return snd_seq_oss_synth_raw_event(dp, q->c[1], q->c, ev);
sound/core/seq/oss/seq_oss_event.c
88
return old_event(dp, q, ev);
sound/core/seq/oss/seq_oss_event.c
95
old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev)
sound/core/seq/oss/seq_oss_event.c
97
switch (q->s.code) {
sound/core/seq/oss/seq_oss_event.c
99
return note_off_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev);
sound/core/seq/oss/seq_oss_event.h
94
int snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
sound/core/seq/oss/seq_oss_event.h
95
int snd_seq_oss_process_timer_event(struct seq_oss_timer *rec, union evrec *q);
sound/core/seq/oss/seq_oss_readq.c
100
result = snd_seq_oss_readq_put_event(q, &rec);
sound/core/seq/oss/seq_oss_readq.c
123
int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
sound/core/seq/oss/seq_oss_readq.c
127
.readq = q,
sound/core/seq/oss/seq_oss_readq.c
141
snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev)
sound/core/seq/oss/seq_oss_readq.c
143
guard(spinlock_irqsave)(&q->lock);
sound/core/seq/oss/seq_oss_readq.c
144
if (q->qlen >= q->maxlen - 1)
sound/core/seq/oss/seq_oss_readq.c
147
memcpy(&q->q[q->tail], ev, sizeof(*ev));
sound/core/seq/oss/seq_oss_readq.c
148
q->tail = (q->tail + 1) % q->maxlen;
sound/core/seq/oss/seq_oss_readq.c
149
q->qlen++;
sound/core/seq/oss/seq_oss_readq.c
152
wake_up(&q->midi_sleep);
sound/core/seq/oss/seq_oss_readq.c
163
snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec)
sound/core/seq/oss/seq_oss_readq.c
165
if (q->qlen == 0)
sound/core/seq/oss/seq_oss_readq.c
167
memcpy(rec, &q->q[q->head], sizeof(*rec));
sound/core/seq/oss/seq_oss_readq.c
175
snd_seq_oss_readq_wait(struct seq_oss_readq *q)
sound/core/seq/oss/seq_oss_readq.c
177
wait_event_interruptible_timeout(q->midi_sleep,
sound/core/seq/oss/seq_oss_readq.c
178
(q->qlen > 0 || q->head == q->tail),
sound/core/seq/oss/seq_oss_readq.c
179
q->pre_event_timeout);
sound/core/seq/oss/seq_oss_readq.c
187
snd_seq_oss_readq_free(struct seq_oss_readq *q)
sound/core/seq/oss/seq_oss_readq.c
189
if (q->qlen > 0) {
sound/core/seq/oss/seq_oss_readq.c
190
q->head = (q->head + 1) % q->maxlen;
sound/core/seq/oss/seq_oss_readq.c
191
q->qlen--;
sound/core/seq/oss/seq_oss_readq.c
200
snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait)
sound/core/seq/oss/seq_oss_readq.c
202
poll_wait(file, &q->midi_sleep, wait);
sound/core/seq/oss/seq_oss_readq.c
203
return q->qlen;
sound/core/seq/oss/seq_oss_readq.c
210
snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode)
sound/core/seq/oss/seq_oss_readq.c
212
if (curt != q->input_time) {
sound/core/seq/oss/seq_oss_readq.c
218
snd_seq_oss_readq_put_event(q, &rec);
sound/core/seq/oss/seq_oss_readq.c
224
snd_seq_oss_readq_put_event(q, &rec);
sound/core/seq/oss/seq_oss_readq.c
227
q->input_time = curt;
sound/core/seq/oss/seq_oss_readq.c
238
snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf)
sound/core/seq/oss/seq_oss_readq.c
241
(waitqueue_active(&q->midi_sleep) ? "sleeping":"running"),
sound/core/seq/oss/seq_oss_readq.c
242
q->qlen, q->input_time);
sound/core/seq/oss/seq_oss_readq.c
35
struct seq_oss_readq *q;
sound/core/seq/oss/seq_oss_readq.c
37
q = kzalloc_obj(*q);
sound/core/seq/oss/seq_oss_readq.c
38
if (!q)
sound/core/seq/oss/seq_oss_readq.c
41
q->q = kzalloc_objs(union evrec, maxlen);
sound/core/seq/oss/seq_oss_readq.c
42
if (!q->q) {
sound/core/seq/oss/seq_oss_readq.c
43
kfree(q);
sound/core/seq/oss/seq_oss_readq.c
47
q->maxlen = maxlen;
sound/core/seq/oss/seq_oss_readq.c
48
q->qlen = 0;
sound/core/seq/oss/seq_oss_readq.c
49
q->head = q->tail = 0;
sound/core/seq/oss/seq_oss_readq.c
50
init_waitqueue_head(&q->midi_sleep);
sound/core/seq/oss/seq_oss_readq.c
51
spin_lock_init(&q->lock);
sound/core/seq/oss/seq_oss_readq.c
52
q->pre_event_timeout = SNDRV_SEQ_OSS_MAX_TIMEOUT;
sound/core/seq/oss/seq_oss_readq.c
53
q->input_time = (unsigned long)-1;
sound/core/seq/oss/seq_oss_readq.c
55
return q;
sound/core/seq/oss/seq_oss_readq.c
62
snd_seq_oss_readq_delete(struct seq_oss_readq *q)
sound/core/seq/oss/seq_oss_readq.c
64
if (q) {
sound/core/seq/oss/seq_oss_readq.c
65
kfree(q->q);
sound/core/seq/oss/seq_oss_readq.c
66
kfree(q);
sound/core/seq/oss/seq_oss_readq.c
74
snd_seq_oss_readq_clear(struct seq_oss_readq *q)
sound/core/seq/oss/seq_oss_readq.c
76
if (q->qlen) {
sound/core/seq/oss/seq_oss_readq.c
77
q->qlen = 0;
sound/core/seq/oss/seq_oss_readq.c
78
q->head = q->tail = 0;
sound/core/seq/oss/seq_oss_readq.c
81
wake_up(&q->midi_sleep);
sound/core/seq/oss/seq_oss_readq.c
82
q->input_time = (unsigned long)-1;
sound/core/seq/oss/seq_oss_readq.c
89
snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len)
sound/core/seq/oss/seq_oss_readq.h
19
union evrec *q;
sound/core/seq/oss/seq_oss_readq.h
30
void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
sound/core/seq/oss/seq_oss_readq.h
34
int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
sound/core/seq/oss/seq_oss_readq.h
38
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
sound/core/seq/oss/seq_oss_readq.h
39
void snd_seq_oss_readq_wait(struct seq_oss_readq *q);
sound/core/seq/oss/seq_oss_readq.h
40
void snd_seq_oss_readq_free(struct seq_oss_readq *q);
sound/core/seq/oss/seq_oss_readq.h
42
#define snd_seq_oss_readq_lock(q, flags) spin_lock_irqsave(&(q)->lock, flags)
sound/core/seq/oss/seq_oss_readq.h
43
#define snd_seq_oss_readq_unlock(q, flags) spin_unlock_irqrestore(&(q)->lock, flags)
sound/core/seq/oss/seq_oss_writeq.c
106
q->sync_event_put = 1;
sound/core/seq/oss/seq_oss_writeq.c
110
wait_event_interruptible_timeout(q->sync_sleep, ! q->sync_event_put, HZ);
sound/core/seq/oss/seq_oss_writeq.c
113
q->sync_event_put = 0;
sound/core/seq/oss/seq_oss_writeq.c
114
if (! q->sync_event_put || q->sync_time >= time)
sound/core/seq/oss/seq_oss_writeq.c
123
snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time)
sound/core/seq/oss/seq_oss_writeq.c
125
guard(spinlock_irqsave)(&q->sync_lock);
sound/core/seq/oss/seq_oss_writeq.c
126
q->sync_time = time;
sound/core/seq/oss/seq_oss_writeq.c
127
q->sync_event_put = 0;
sound/core/seq/oss/seq_oss_writeq.c
128
wake_up(&q->sync_sleep);
sound/core/seq/oss/seq_oss_writeq.c
136
snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q)
sound/core/seq/oss/seq_oss_writeq.c
139
pool.client = q->dp->cseq;
sound/core/seq/oss/seq_oss_writeq.c
140
snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool);
sound/core/seq/oss/seq_oss_writeq.c
149
snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val)
sound/core/seq/oss/seq_oss_writeq.c
152
pool.client = q->dp->cseq;
sound/core/seq/oss/seq_oss_writeq.c
153
snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool);
sound/core/seq/oss/seq_oss_writeq.c
155
snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool);
sound/core/seq/oss/seq_oss_writeq.c
27
struct seq_oss_writeq *q;
sound/core/seq/oss/seq_oss_writeq.c
30
q = kzalloc_obj(*q);
sound/core/seq/oss/seq_oss_writeq.c
31
if (!q)
sound/core/seq/oss/seq_oss_writeq.c
33
q->dp = dp;
sound/core/seq/oss/seq_oss_writeq.c
34
q->maxlen = maxlen;
sound/core/seq/oss/seq_oss_writeq.c
35
spin_lock_init(&q->sync_lock);
sound/core/seq/oss/seq_oss_writeq.c
36
q->sync_event_put = 0;
sound/core/seq/oss/seq_oss_writeq.c
37
q->sync_time = 0;
sound/core/seq/oss/seq_oss_writeq.c
38
init_waitqueue_head(&q->sync_sleep);
sound/core/seq/oss/seq_oss_writeq.c
47
return q;
sound/core/seq/oss/seq_oss_writeq.c
54
snd_seq_oss_writeq_delete(struct seq_oss_writeq *q)
sound/core/seq/oss/seq_oss_writeq.c
56
if (q) {
sound/core/seq/oss/seq_oss_writeq.c
57
snd_seq_oss_writeq_clear(q); /* to be sure */
sound/core/seq/oss/seq_oss_writeq.c
58
kfree(q);
sound/core/seq/oss/seq_oss_writeq.c
67
snd_seq_oss_writeq_clear(struct seq_oss_writeq *q)
sound/core/seq/oss/seq_oss_writeq.c
73
snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_REMOVE_EVENTS, &reset);
sound/core/seq/oss/seq_oss_writeq.c
76
snd_seq_oss_writeq_wakeup(q, 0);
sound/core/seq/oss/seq_oss_writeq.c
83
snd_seq_oss_writeq_sync(struct seq_oss_writeq *q)
sound/core/seq/oss/seq_oss_writeq.c
85
struct seq_oss_devinfo *dp = q->dp;
sound/core/seq/oss/seq_oss_writeq.c
89
if (q->sync_time >= time)
sound/core/seq/oss/seq_oss_writeq.c
92
if (! q->sync_event_put) {
sound/core/seq/oss/seq_oss_writeq.h
29
void snd_seq_oss_writeq_delete(struct seq_oss_writeq *q);
sound/core/seq/oss/seq_oss_writeq.h
30
void snd_seq_oss_writeq_clear(struct seq_oss_writeq *q);
sound/core/seq/oss/seq_oss_writeq.h
31
int snd_seq_oss_writeq_sync(struct seq_oss_writeq *q);
sound/core/seq/oss/seq_oss_writeq.h
32
void snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time);
sound/core/seq/oss/seq_oss_writeq.h
33
int snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q);
sound/core/seq/oss/seq_oss_writeq.h
34
void snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int size);
sound/core/seq/seq_clientmgr.c
1523
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_clientmgr.c
1526
if (IS_ERR(q))
sound/core/seq/seq_clientmgr.c
1527
return PTR_ERR(q);
sound/core/seq/seq_clientmgr.c
1529
info->queue = q->queue;
sound/core/seq/seq_clientmgr.c
1530
info->locked = q->locked;
sound/core/seq/seq_clientmgr.c
1531
info->owner = q->owner;
sound/core/seq/seq_clientmgr.c
1535
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
sound/core/seq/seq_clientmgr.c
1536
strscpy(q->name, info->name, sizeof(q->name));
sound/core/seq/seq_clientmgr.c
1554
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_clientmgr.c
1557
if (q == NULL)
sound/core/seq/seq_clientmgr.c
1561
info->queue = q->queue;
sound/core/seq/seq_clientmgr.c
1562
info->owner = q->owner;
sound/core/seq/seq_clientmgr.c
1563
info->locked = q->locked;
sound/core/seq/seq_clientmgr.c
1564
strscpy(info->name, q->name, sizeof(info->name));
sound/core/seq/seq_clientmgr.c
1588
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_clientmgr.c
1590
if (! q)
sound/core/seq/seq_clientmgr.c
1592
if (q->owner != client->number)
sound/core/seq/seq_clientmgr.c
1594
strscpy(q->name, info->name, sizeof(q->name));
sound/core/seq/seq_clientmgr.c
1604
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_clientmgr.c
1607
if (q == NULL)
sound/core/seq/seq_clientmgr.c
1609
info->queue = q->queue;
sound/core/seq/seq_clientmgr.c
1610
info->owner = q->owner;
sound/core/seq/seq_clientmgr.c
1611
info->locked = q->locked;
sound/core/seq/seq_clientmgr.c
1732
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_clientmgr.c
1735
if (q == NULL)
sound/core/seq/seq_clientmgr.c
1737
guard(mutex)(&q->timer_mutex);
sound/core/seq/seq_clientmgr.c
1738
tmr = q->timer;
sound/core/seq/seq_clientmgr.c
568
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_clientmgr.c
571
if (! q)
sound/core/seq/seq_clientmgr.c
576
event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
sound/core/seq/seq_clientmgr.c
579
event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
sound/core/seq/seq_queue.c
100
q->queue = -1;
sound/core/seq/seq_queue.c
102
q->tickq = snd_seq_prioq_new();
sound/core/seq/seq_queue.c
103
q->timeq = snd_seq_prioq_new();
sound/core/seq/seq_queue.c
104
q->timer = snd_seq_timer_new();
sound/core/seq/seq_queue.c
105
if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
sound/core/seq/seq_queue.c
106
snd_seq_prioq_delete(&q->tickq);
sound/core/seq/seq_queue.c
107
snd_seq_prioq_delete(&q->timeq);
sound/core/seq/seq_queue.c
108
snd_seq_timer_delete(&q->timer);
sound/core/seq/seq_queue.c
109
kfree(q);
sound/core/seq/seq_queue.c
113
q->owner = owner;
sound/core/seq/seq_queue.c
114
q->locked = locked;
sound/core/seq/seq_queue.c
115
q->klocked = 0;
sound/core/seq/seq_queue.c
117
return q;
sound/core/seq/seq_queue.c
121
static void queue_delete(struct snd_seq_queue *q)
sound/core/seq/seq_queue.c
124
mutex_lock(&q->timer_mutex);
sound/core/seq/seq_queue.c
125
snd_seq_timer_stop(q->timer);
sound/core/seq/seq_queue.c
126
snd_seq_timer_close(q);
sound/core/seq/seq_queue.c
127
mutex_unlock(&q->timer_mutex);
sound/core/seq/seq_queue.c
129
snd_use_lock_sync(&q->use_lock);
sound/core/seq/seq_queue.c
131
snd_seq_prioq_delete(&q->tickq);
sound/core/seq/seq_queue.c
132
snd_seq_prioq_delete(&q->timeq);
sound/core/seq/seq_queue.c
133
snd_seq_timer_delete(&q->timer);
sound/core/seq/seq_queue.c
135
kfree(q);
sound/core/seq/seq_queue.c
162
struct snd_seq_queue *q;
sound/core/seq/seq_queue.c
164
q = queue_new(client, locked);
sound/core/seq/seq_queue.c
165
if (q == NULL)
sound/core/seq/seq_queue.c
167
q->info_flags = info_flags;
sound/core/seq/seq_queue.c
168
queue_use(q, client, 1);
sound/core/seq/seq_queue.c
169
snd_use_lock_use(&q->use_lock);
sound/core/seq/seq_queue.c
170
if (queue_list_add(q) < 0) {
sound/core/seq/seq_queue.c
171
snd_use_lock_free(&q->use_lock);
sound/core/seq/seq_queue.c
172
queue_delete(q);
sound/core/seq/seq_queue.c
175
return q;
sound/core/seq/seq_queue.c
181
struct snd_seq_queue *q;
sound/core/seq/seq_queue.c
185
q = queue_list_remove(queueid, client);
sound/core/seq/seq_queue.c
186
if (q == NULL)
sound/core/seq/seq_queue.c
188
queue_delete(q);
sound/core/seq/seq_queue.c
197
struct snd_seq_queue *q;
sound/core/seq/seq_queue.c
202
q = queue_list[queueid];
sound/core/seq/seq_queue.c
203
if (q)
sound/core/seq/seq_queue.c
204
snd_use_lock_use(&q->use_lock);
sound/core/seq/seq_queue.c
205
return q;
sound/core/seq/seq_queue.c
214
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_queue.c
217
if (q) {
sound/core/seq/seq_queue.c
218
if (strncmp(q->name, name, sizeof(q->name)) == 0)
sound/core/seq/seq_queue.c
219
return no_free_ptr(q);
sound/core/seq/seq_queue.c
230
void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
sound/core/seq/seq_queue.c
237
if (q == NULL)
sound/core/seq/seq_queue.c
241
scoped_guard(spinlock_irqsave, &q->check_lock) {
sound/core/seq/seq_queue.c
242
if (q->check_blocked) {
sound/core/seq/seq_queue.c
243
q->check_again = 1;
sound/core/seq/seq_queue.c
246
q->check_blocked = 1;
sound/core/seq/seq_queue.c
251
cur_tick = snd_seq_timer_get_cur_tick(q->timer);
sound/core/seq/seq_queue.c
253
cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
sound/core/seq/seq_queue.c
262
cur_time = snd_seq_timer_get_cur_time(q->timer, false);
sound/core/seq/seq_queue.c
264
cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
sound/core/seq/seq_queue.c
274
scoped_guard(spinlock_irqsave, &q->check_lock) {
sound/core/seq/seq_queue.c
275
if (q->check_again) {
sound/core/seq/seq_queue.c
276
q->check_again = 0;
sound/core/seq/seq_queue.c
280
q->check_blocked = 0;
sound/core/seq/seq_queue.c
294
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_queue.c
296
if (q == NULL)
sound/core/seq/seq_queue.c
302
cell->event.time.tick += q->timer->tick.cur_tick;
sound/core/seq/seq_queue.c
307
&q->timer->cur_time);
sound/core/seq/seq_queue.c
316
err = snd_seq_prioq_cell_in(q->tickq, cell);
sound/core/seq/seq_queue.c
321
err = snd_seq_prioq_cell_in(q->timeq, cell);
sound/core/seq/seq_queue.c
329
snd_seq_check_queue(q, atomic, hop);
sound/core/seq/seq_queue.c
337
static inline int check_access(struct snd_seq_queue *q, int client)
sound/core/seq/seq_queue.c
339
return (q->owner == client) || (!q->locked && !q->klocked);
sound/core/seq/seq_queue.c
345
static int queue_access_lock(struct snd_seq_queue *q, int client)
sound/core/seq/seq_queue.c
349
guard(spinlock_irqsave)(&q->owner_lock);
sound/core/seq/seq_queue.c
350
access_ok = check_access(q, client);
sound/core/seq/seq_queue.c
352
q->klocked = 1;
sound/core/seq/seq_queue.c
357
static inline void queue_access_unlock(struct snd_seq_queue *q)
sound/core/seq/seq_queue.c
359
guard(spinlock_irqsave)(&q->owner_lock);
sound/core/seq/seq_queue.c
360
q->klocked = 0;
sound/core/seq/seq_queue.c
366
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
sound/core/seq/seq_queue.c
368
if (! q)
sound/core/seq/seq_queue.c
370
guard(spinlock_irqsave)(&q->owner_lock);
sound/core/seq/seq_queue.c
371
return check_access(q, client);
sound/core/seq/seq_queue.c
381
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
sound/core/seq/seq_queue.c
383
if (q == NULL)
sound/core/seq/seq_queue.c
386
if (!queue_access_lock(q, client))
sound/core/seq/seq_queue.c
389
scoped_guard(spinlock_irqsave, &q->owner_lock) {
sound/core/seq/seq_queue.c
390
q->locked = locked ? 1 : 0;
sound/core/seq/seq_queue.c
391
q->owner = client;
sound/core/seq/seq_queue.c
393
queue_access_unlock(q);
sound/core/seq/seq_queue.c
442
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(queueid);
sound/core/seq/seq_queue.c
445
if (q == NULL)
sound/core/seq/seq_queue.c
447
if (!queue_access_lock(q, client))
sound/core/seq/seq_queue.c
450
result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq,
sound/core/seq/seq_queue.c
453
result = snd_seq_timer_set_skew(q->timer, info->skew_value,
sound/core/seq/seq_queue.c
455
queue_access_unlock(q);
sound/core/seq/seq_queue.c
50
static int queue_list_add(struct snd_seq_queue *q)
sound/core/seq/seq_queue.c
501
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_queue.c
504
if (q == NULL)
sound/core/seq/seq_queue.c
506
return test_bit(client, q->clients_bitmap) ? 1 : 0;
sound/core/seq/seq_queue.c
522
struct snd_seq_queue *q = queue_list_remove(i, client);
sound/core/seq/seq_queue.c
523
if (q)
sound/core/seq/seq_queue.c
524
queue_delete(q);
sound/core/seq/seq_queue.c
531
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
sound/core/seq/seq_queue.c
532
if (!q)
sound/core/seq/seq_queue.c
534
if (test_bit(client, q->clients_bitmap)) {
sound/core/seq/seq_queue.c
535
snd_seq_prioq_leave(q->tickq, client, 0);
sound/core/seq/seq_queue.c
536
snd_seq_prioq_leave(q->timeq, client, 0);
sound/core/seq/seq_queue.c
537
snd_seq_queue_use(q->queue, client, 0);
sound/core/seq/seq_queue.c
552
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
sound/core/seq/seq_queue.c
553
if (!q)
sound/core/seq/seq_queue.c
555
if (test_bit(client, q->clients_bitmap) &&
sound/core/seq/seq_queue.c
557
q->queue == info->queue)) {
sound/core/seq/seq_queue.c
558
snd_seq_prioq_remove_events(q->tickq, client, info);
sound/core/seq/seq_queue.c
559
snd_seq_prioq_remove_events(q->timeq, client, info);
sound/core/seq/seq_queue.c
569
static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
sound/core/seq/seq_queue.c
57
queue_list[i] = q;
sound/core/seq/seq_queue.c
577
sev.time.tick = q->timer->tick.cur_tick;
sound/core/seq/seq_queue.c
578
sev.queue = q->queue;
sound/core/seq/seq_queue.c
579
sev.data.queue.queue = q->queue;
sound/core/seq/seq_queue.c
58
q->queue = i;
sound/core/seq/seq_queue.c
592
static void snd_seq_queue_process_event(struct snd_seq_queue *q,
sound/core/seq/seq_queue.c
598
snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
sound/core/seq/seq_queue.c
599
snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
sound/core/seq/seq_queue.c
600
if (! snd_seq_timer_start(q->timer))
sound/core/seq/seq_queue.c
601
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
605
if (! snd_seq_timer_continue(q->timer))
sound/core/seq/seq_queue.c
606
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
610
snd_seq_timer_stop(q->timer);
sound/core/seq/seq_queue.c
611
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
615
snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
sound/core/seq/seq_queue.c
616
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
620
if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
sound/core/seq/seq_queue.c
621
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
626
if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
sound/core/seq/seq_queue.c
627
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
631
if (snd_seq_timer_set_skew(q->timer,
sound/core/seq/seq_queue.c
634
queue_broadcast_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
650
struct snd_seq_queue *q __free(snd_seq_queue) =
sound/core/seq/seq_queue.c
653
if (q == NULL)
sound/core/seq/seq_queue.c
656
if (!queue_access_lock(q, ev->source.client))
sound/core/seq/seq_queue.c
659
snd_seq_queue_process_event(q, ev, atomic, hop);
sound/core/seq/seq_queue.c
661
queue_access_unlock(q);
sound/core/seq/seq_queue.c
679
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(i);
sound/core/seq/seq_queue.c
68
struct snd_seq_queue *q;
sound/core/seq/seq_queue.c
680
if (!q)
sound/core/seq/seq_queue.c
683
tmr = q->timer;
sound/core/seq/seq_queue.c
689
scoped_guard(spinlock_irq, &q->owner_lock) {
sound/core/seq/seq_queue.c
690
locked = q->locked;
sound/core/seq/seq_queue.c
691
owner = q->owner;
sound/core/seq/seq_queue.c
694
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
sound/core/seq/seq_queue.c
697
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
sound/core/seq/seq_queue.c
698
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
sound/core/seq/seq_queue.c
71
q = queue_list[id];
sound/core/seq/seq_queue.c
72
if (q) {
sound/core/seq/seq_queue.c
73
guard(spinlock)(&q->owner_lock);
sound/core/seq/seq_queue.c
74
if (q->owner == client) {
sound/core/seq/seq_queue.c
76
q->klocked = 1;
sound/core/seq/seq_queue.c
79
return q;
sound/core/seq/seq_queue.c
90
struct snd_seq_queue *q;
sound/core/seq/seq_queue.c
92
q = kzalloc_obj(*q);
sound/core/seq/seq_queue.c
93
if (!q)
sound/core/seq/seq_queue.c
96
spin_lock_init(&q->owner_lock);
sound/core/seq/seq_queue.c
97
spin_lock_init(&q->check_lock);
sound/core/seq/seq_queue.c
98
mutex_init(&q->timer_mutex);
sound/core/seq/seq_queue.c
99
snd_use_lock_init(&q->use_lock);
sound/core/seq/seq_queue.h
74
#define queuefree(q) snd_use_lock_free(&(q)->use_lock)
sound/core/seq/seq_queue.h
82
void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop);
sound/core/seq/seq_timer.c
122
struct snd_seq_queue *q = timeri->callback_data;
sound/core/seq/seq_timer.c
125
if (q == NULL)
sound/core/seq/seq_timer.c
127
tmr = q->timer;
sound/core/seq/seq_timer.c
153
snd_seq_check_queue(q, 1, 0);
sound/core/seq/seq_timer.c
243
int snd_seq_timer_open(struct snd_seq_queue *q)
sound/core/seq/seq_timer.c
250
tmr = q->timer;
sound/core/seq/seq_timer.c
255
sprintf(str, "sequencer queue %i", q->queue);
sound/core/seq/seq_timer.c
264
t->callback_data = q;
sound/core/seq/seq_timer.c
266
err = snd_timer_open(t, &tmr->alsa_id, q->queue);
sound/core/seq/seq_timer.c
276
err = snd_timer_open(t, &tid, q->queue);
sound/core/seq/seq_timer.c
298
int snd_seq_timer_close(struct snd_seq_queue *q)
sound/core/seq/seq_timer.c
303
tmr = q->timer;
sound/core/seq/seq_timer.c
448
struct snd_seq_queue *q __free(snd_seq_queue) = queueptr(idx);
sound/core/seq/seq_timer.c
450
if (q == NULL)
sound/core/seq/seq_timer.c
452
scoped_guard(mutex, &q->timer_mutex) {
sound/core/seq/seq_timer.c
453
tmr = q->timer;
sound/core/seq/seq_timer.c
459
snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
sound/core/seq/seq_timer.h
110
int snd_seq_timer_open(struct snd_seq_queue *q);
sound/core/seq/seq_timer.h
111
int snd_seq_timer_close(struct snd_seq_queue *q);
sound/hda/codecs/hdmi/hdmi.c
1572
const struct snd_pci_quirk *q;
sound/hda/codecs/hdmi/hdmi.c
1583
q = snd_pci_quirk_lookup(codec->bus->pci, force_connect_list);
sound/hda/codecs/hdmi/hdmi.c
1585
if (q && q->value)
sound/hda/codecs/realtek/realtek.c
918
const struct alc_codec_rename_pci_table *q;
sound/hda/codecs/realtek/realtek.c
929
for (q = rename_pci_tbl; q->codec_vendor_id; q++) {
sound/hda/codecs/realtek/realtek.c
930
if (q->codec_vendor_id != codec->core.vendor_id)
sound/hda/codecs/realtek/realtek.c
932
if (q->pci_subvendor != codec->bus->pci->subsystem_vendor)
sound/hda/codecs/realtek/realtek.c
934
if (!q->pci_subdevice ||
sound/hda/codecs/realtek/realtek.c
935
q->pci_subdevice == codec->bus->pci->subsystem_device)
sound/hda/codecs/realtek/realtek.c
936
return alc_codec_rename(codec, q->name);
sound/hda/codecs/realtek/realtek.c
990
const struct snd_pci_quirk *q;
sound/hda/codecs/realtek/realtek.c
991
q = snd_pci_quirk_lookup(codec->bus->pci, beep_allow_list);
sound/hda/codecs/realtek/realtek.c
992
if (q)
sound/hda/codecs/realtek/realtek.c
993
return q->value;
sound/hda/common/auto_parser.c
1011
const struct hda_quirk *q;
sound/hda/common/auto_parser.c
1059
q = hda_quirk_lookup_id(vendor, device, quirk);
sound/hda/common/auto_parser.c
1060
if (q) {
sound/hda/common/auto_parser.c
1067
for (q = quirk; q->subvendor || q->subdevice; q++) {
sound/hda/common/auto_parser.c
1069
if (!codec->bus->pci || q->match_codec_ssid) {
sound/hda/common/auto_parser.c
1070
if (hda_quirk_match(codec_vendor, codec_device, q)) {
sound/hda/common/auto_parser.c
1075
if (hda_quirk_match(pci_vendor, pci_device, q)) {
sound/hda/common/auto_parser.c
1083
q = hda_quirk_lookup_id(codec_vendor, codec_device, quirk);
sound/hda/common/auto_parser.c
1084
if (q) {
sound/hda/common/auto_parser.c
1092
id = q->value;
sound/hda/common/auto_parser.c
1094
name = q->name;
sound/hda/common/auto_parser.c
1098
type, q->subvendor, q->subdevice);
sound/hda/common/auto_parser.c
968
static bool hda_quirk_match(u16 vendor, u16 device, const struct hda_quirk *q)
sound/hda/common/auto_parser.c
970
if (q->subvendor != vendor)
sound/hda/common/auto_parser.c
972
return !q->subdevice ||
sound/hda/common/auto_parser.c
973
(device & q->subdevice_mask) == q->subdevice;
sound/hda/common/auto_parser.c
980
const struct hda_quirk *q;
sound/hda/common/auto_parser.c
982
for (q = list; q->subvendor || q->subdevice; q++) {
sound/hda/common/auto_parser.c
983
if (hda_quirk_match(vendor, device, q))
sound/hda/common/auto_parser.c
984
return q;
sound/hda/common/codec.c
1162
struct hda_cvt_setup *q);
sound/hda/common/codec.c
1197
struct hda_cvt_setup *q)
sound/hda/common/codec.c
1200
hda_nid_t nid = q->nid;
sound/hda/common/codec.c
1202
if (q->stream_tag || q->channel_id)
sound/hda/common/codec.c
1204
if (q->format_id)
sound/hda/common/codec.c
1207
memset(q, 0, sizeof(*q));
sound/hda/common/codec.c
1208
q->nid = nid;
sound/hda/controllers/intel.c
1524
const struct snd_pci_quirk *q;
sound/hda/controllers/intel.c
1537
q = snd_pci_quirk_lookup(chip->pci, position_fix_list);
sound/hda/controllers/intel.c
1538
if (q) {
sound/hda/controllers/intel.c
1541
q->value, q->subvendor, q->subdevice);
sound/hda/controllers/intel.c
1542
return q->value;
sound/hda/controllers/intel.c
1621
const struct snd_pci_quirk *q;
sound/hda/controllers/intel.c
1625
q = snd_pci_quirk_lookup(chip->pci, probe_mask_list);
sound/hda/controllers/intel.c
1626
if (q) {
sound/hda/controllers/intel.c
1629
q->value, q->subvendor, q->subdevice);
sound/hda/controllers/intel.c
1630
chip->codec_probe_mask = q->value;
sound/hda/controllers/intel.c
1662
const struct snd_pci_quirk *q;
sound/hda/controllers/intel.c
1669
q = snd_pci_quirk_lookup(chip->pci, msi_deny_list);
sound/hda/controllers/intel.c
1670
if (q) {
sound/hda/controllers/intel.c
1673
q->subvendor, q->subdevice, q->value);
sound/hda/controllers/intel.c
1674
chip->msi = q->value;
sound/hda/controllers/intel.c
2308
const struct snd_pci_quirk *q;
sound/hda/controllers/intel.c
2310
q = snd_pci_quirk_lookup(chip->pci, power_save_denylist);
sound/hda/controllers/intel.c
2311
if (q && val) {
sound/hda/controllers/intel.c
2313
q->subvendor, q->subdevice);
sound/pci/ac97/ac97_codec.c
2932
const struct quirk_table *q;
sound/pci/ac97/ac97_codec.c
2935
q = &applicable_quirks[i];
sound/pci/ac97/ac97_codec.c
2936
if (q->name && ! strcmp(typestr, q->name))
sound/pci/atiixp.c
548
const struct snd_pci_quirk *q;
sound/pci/atiixp.c
550
q = snd_pci_quirk_lookup(pci, atiixp_quirks);
sound/pci/atiixp.c
551
if (q) {
sound/pci/atiixp.c
553
snd_pci_quirk_name(q), q->value);
sound/pci/atiixp.c
554
return q->value;
sound/pci/emu10k1/memory.c
169
struct snd_emu10k1_memblk *q;
sound/pci/emu10k1/memory.c
174
q = get_emu10k1_memblk(p, mapped_link);
sound/pci/emu10k1/memory.c
175
start_page = q->mapped_page + q->pages;
sound/pci/emu10k1/memory.c
181
q = get_emu10k1_memblk(p, mapped_link);
sound/pci/emu10k1/memory.c
182
end_page = q->mapped_page;
sound/pci/emu10k1/memory.c
443
struct snd_emu10k1_memblk *q;
sound/pci/emu10k1/memory.c
448
q = get_emu10k1_memblk(p, mem.list);
sound/pci/emu10k1/memory.c
449
if (q->last_page == first_page)
sound/pci/emu10k1/memory.c
455
q = get_emu10k1_memblk(p, mem.list);
sound/pci/emu10k1/memory.c
456
if (q->first_page == last_page)
sound/pci/nm256/nm256.c
1557
const struct snd_pci_quirk *q;
sound/pci/nm256/nm256.c
1559
q = snd_pci_quirk_lookup(pci, nm256_quirks);
sound/pci/nm256/nm256.c
1560
if (q) {
sound/pci/nm256/nm256.c
1562
snd_pci_quirk_name(q));
sound/pci/nm256/nm256.c
1563
switch (q->value) {
sound/sh/aica.c
216
int q, err, period_offset;
sound/sh/aica.c
225
for (q = 0; q < channels; q++) {
sound/sh/aica.c
229
(AICA_BUFFER_SIZE * q) /
sound/sh/aica.c
233
AICA_CHANNEL0_OFFSET + q * CHANNEL_OFFSET +
sound/soc/codecs/rt5645.c
33
#define QUIRK_INV_JD1_1(q) ((q) & 1)
sound/soc/codecs/rt5645.c
34
#define QUIRK_LEVEL_IRQ(q) (((q) >> 1) & 1)
sound/soc/codecs/rt5645.c
35
#define QUIRK_IN2_DIFF(q) (((q) >> 2) & 1)
sound/soc/codecs/rt5645.c
36
#define QUIRK_INV_HP_POL(q) (((q) >> 3) & 1)
sound/soc/codecs/rt5645.c
37
#define QUIRK_JD_MODE(q) (((q) >> 4) & 7)
sound/soc/codecs/rt5645.c
38
#define QUIRK_DMIC1_DATA_PIN(q) (((q) >> 8) & 3)
sound/soc/codecs/rt5645.c
39
#define QUIRK_DMIC2_DATA_PIN(q) (((q) >> 12) & 3)
sound/soc/codecs/tas2552.c
187
unsigned int d, q, t;
sound/soc/codecs/tas2552.c
199
q = d / (t + 1);
sound/soc/codecs/tas2552.c
200
d = q + ((9999 - pll_clkin % 10000) * (d / t - q)) / 10000;
sound/soc/codecs/tas2781-i2c.c
552
struct bulk_reg_val *q = (struct bulk_reg_val *)tas2563_cali_start_reg;
sound/soc/codecs/tas2781-i2c.c
581
q[j].val[0]);
sound/soc/codecs/tas2781-i2c.c
584
q[j].val, 4);
tools/accounting/delaytop.c
169
SORT_FIELD(irq, q, MODE_DEFAULT),
tools/arch/x86/include/asm/asm.h
29
inst##q##__VA_ARGS__)
tools/bootconfig/main.c
27
char q;
tools/bootconfig/main.c
33
q = '\'';
tools/bootconfig/main.c
35
q = '"';
tools/bootconfig/main.c
36
printf("%c%s%c%s", q, val, q, xbc_node_is_array(node) ? ", " : eol);
tools/hv/hv_fcopy_uio_daemon.c
117
char *q, *p;
tools/hv/hv_fcopy_uio_daemon.c
131
while ((q = strchr(p, '/')) != NULL) {
tools/hv/hv_fcopy_uio_daemon.c
132
if (q == p) {
tools/hv/hv_fcopy_uio_daemon.c
136
*q = '\0';
tools/hv/hv_fcopy_uio_daemon.c
149
p = q + 1;
tools/hv/hv_fcopy_uio_daemon.c
150
*q = '/';
tools/hv/hv_kvp_daemon.c
503
char *value, *q;
tools/hv/hv_kvp_daemon.c
517
q = p;
tools/hv/hv_kvp_daemon.c
523
*q++ = *p++;
tools/hv/hv_kvp_daemon.c
528
*q++ = *p++;
tools/hv/hv_kvp_daemon.c
531
*q = 0;
tools/include/linux/math64.h
10
u64 q;
tools/include/linux/math64.h
12
asm ("mulq %2; divq %3" : "=a" (q)
tools/include/linux/math64.h
16
return q;
tools/net/ynl/samples/tc.c
11
static void tc_qdisc_print(struct tc_getqdisc_rsp *q)
tools/net/ynl/samples/tc.c
16
name = if_indextoname(q->_hdr.tcm_ifindex, ifname);
tools/net/ynl/samples/tc.c
20
if (q->_len.kind) {
tools/net/ynl/samples/tc.c
21
printf("%s ", q->kind);
tools/net/ynl/samples/tc.c
23
if (q->options._present.fq_codel) {
tools/net/ynl/samples/tc.c
27
fq_codel = &q->options.fq_codel;
tools/net/ynl/samples/tc.c
28
stats = q->stats2.app.fq_codel;
tools/net/ynl/samples/tc.c
35
if (q->stats2.app._len.fq_codel)
tools/perf/bench/sched-pipe.c
58
char *q;
tools/perf/bench/sched-pipe.c
66
q = strchr(p, ',');
tools/perf/bench/sched-pipe.c
67
if (q == NULL) {
tools/perf/bench/sched-pipe.c
71
*q = '\0';
tools/perf/bench/sched-pipe.c
74
cgrp_names[1] = strdup(q + 1);
tools/perf/builtin-ftrace.c
859
char *p, *q;
tools/perf/builtin-ftrace.c
868
for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) {
tools/perf/builtin-ftrace.c
869
*q = '\0';
tools/perf/util/annotate.c
2456
char *regname, *q;
tools/perf/util/annotate.c
2467
q = strpbrk(regname, ",) ");
tools/perf/util/annotate.c
2468
if (q)
tools/perf/util/annotate.c
2469
*q = '\0';
tools/perf/util/bpf-filter.c
208
char *buf, *p, *q;
tools/perf/util/bpf-filter.c
222
tgid = strtol(p + 6, &q, 0);
tools/perf/util/bpf-filter.c
224
if (*q != '\n')
tools/perf/util/demangle-java.c
101
buf[rlen++] = *q;
tools/perf/util/demangle-java.c
111
buf[rlen++] = *q;
tools/perf/util/demangle-java.c
117
buf[rlen++] = *q;
tools/perf/util/demangle-java.c
124
if (isalpha(*(q + 1)) && mode == MODE_CLASS)
tools/perf/util/demangle-java.c
137
buf[rlen++] = *q;
tools/perf/util/demangle-java.c
50
const char *q;
tools/perf/util/demangle-java.c
55
for (q = str; q != end; q++) {
tools/perf/util/demangle-java.c
60
switch (*q) {
tools/perf/util/demangle-java.c
73
buf[rlen++] = *q;
tools/perf/util/demangle-java.c
86
rlen += scnprintf(buf + rlen, maxlen - rlen, "%s", base_types[*q - 'A']);
tools/perf/util/demangle-java.c
92
buf[rlen++] = *q;
tools/perf/util/header.c
444
char *q = skip_spaces(r);
tools/perf/util/header.c
446
if (q != (p+1))
tools/perf/util/header.c
447
while ((*r++ = *q++));
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4248
unsigned char *q = buf_a + len_a - MAX_PADDING;
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4251
for (i = MAX_PADDING; i; i--, p++, q++) {
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4252
if (*p != *q)
tools/perf/util/pfm.c
41
char *p, *q, *p_orig;
tools/perf/util/pfm.c
52
for (q = p; strsep(&p, ",{}"); q = p) {
tools/perf/util/pfm.c
64
if (*q == '\0') {
tools/perf/util/pfm.c
78
ret = pfm_get_perf_event_encoding(q, PFM_PLM0|PFM_PLM3,
tools/perf/util/pfm.c
89
&attr, q, /*metric_id=*/NULL,
tools/perf/util/s390-cpumsf.c
200
struct auxtrace_queue *q;
tools/perf/util/s390-cpumsf.c
206
q = &sf->queues.queue_array[sample->cpu];
tools/perf/util/s390-cpumsf.c
207
sfq = q->priv;
tools/perf/util/srcline.c
216
char *q = strim(p);
tools/perf/util/srcline.c
219
if (!strcasecmp(q, a2l_style_names[j])) {
tools/perf/util/srcline.c
226
pr_warning("Unknown addr2line style: %s\n", q);
tools/perf/util/synthetic-events.c
2512
char *p, *q;
tools/perf/util/synthetic-events.c
2518
for (q = synth; (p = strsep(&q, ",")); p = q) {
tools/power/cpupower/utils/helpers/bitmask.c
105
static const char *nexttoken(const char *q, int sep)
tools/power/cpupower/utils/helpers/bitmask.c
107
if (q)
tools/power/cpupower/utils/helpers/bitmask.c
108
q = strchr(q, sep);
tools/power/cpupower/utils/helpers/bitmask.c
109
if (q)
tools/power/cpupower/utils/helpers/bitmask.c
110
q++;
tools/power/cpupower/utils/helpers/bitmask.c
111
return q;
tools/power/cpupower/utils/helpers/bitmask.c
194
const char *p, *q;
tools/power/cpupower/utils/helpers/bitmask.c
198
q = buf;
tools/power/cpupower/utils/helpers/bitmask.c
199
while (p = q, q = nexttoken(q, ','), p) {
tools/sched_ext/include/scx/common.bpf.h
736
u64 q = x / r;
tools/sched_ext/include/scx/common.bpf.h
737
if (r <= q)
tools/sched_ext/include/scx/common.bpf.h
739
r = (r + q) >> 1;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1211
const char *p = log_buf, *q;
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1220
q = strstr(p, buf);
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1221
if (!q) {
tools/testing/selftests/bpf/prog_tests/reg_bounds.c
1225
p = strstr(q, specs[i].reg_upper);
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
109
sch->q.qlen = 0;
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
26
if (sch->q.qlen == sch->limit)
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
35
sch->q.qlen++;
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
72
sch->q.qlen--;
tools/testing/selftests/bpf/progs/bpf_qdisc_fifo.c
93
bpf_for(i, 0, sch->q.qlen) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
132
private(D) struct fq_bpf_data q;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
247
flow->credit = q.initial_quantum,
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
280
hash = bpf_skb_get_hash(skb) & q.orphan_mask;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
287
hash = bpf_skb_get_hash(skb) & q.orphan_mask;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
304
return (s64)skb->tstamp > (s64)(q.ktime_cache + q.horizon);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
317
if (sch->q.qlen >= sch->limit)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
321
time_to_send = q.ktime_cache = bpf_ktime_get_ns();
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
324
q.ktime_cache = bpf_ktime_get_ns();
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
326
if (q.horizon_drop)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
329
skb->tstamp = q.ktime_cache + q.horizon;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
344
if (flow->qlen >= q.flow_plimit) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
353
if ((s64)(jiffies - (flow_copy->age + q.flow_refill_delay)) > 0) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
354
if (flow_copy->credit < q.quantum)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
355
flow_copy->credit = q.quantum;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
359
&q.new_flow_cnt);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
384
sch->q.qlen++;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
408
q.time_next_delayed_flow = flow->time_next_packet;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
422
fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
431
if (q.time_next_delayed_flow > flow->time_next_packet)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
432
q.time_next_delayed_flow = flow->time_next_packet;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
447
if (q.time_next_delayed_flow > now)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
450
sample = (unsigned long)(now - q.time_next_delayed_flow);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
451
q.unthrottle_latency_ns -= q.unthrottle_latency_ns >> 3;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
452
q.unthrottle_latency_ns += sample >> 3;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
454
q.time_next_delayed_flow = ~0ULL;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
472
if (q.new_flow_cnt) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
475
cnt = &q.new_flow_cnt;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
476
} else if (q.old_flow_cnt) {
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
479
cnt = &q.old_flow_cnt;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
481
if (q.time_next_delayed_flow != ~0ULL)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
482
ctx->expire = q.time_next_delayed_flow;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
492
flow->credit += q.quantum;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
493
fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
503
fq_flows_add_tail(&fq_old_flows, &fq_old_flows_lock, flow, &q.old_flow_cnt);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
595
if (!sch->q.qlen)
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
602
q.ktime_cache = cb_ctx.now = bpf_ktime_get_ns();
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
603
fq_check_throttled(q.ktime_cache);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
612
sch->q.qlen--;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
619
bpf_qdisc_watchdog_schedule(sch, cb_ctx.expire, q.timer_slack);
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
698
sch->q.qlen = 0;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
708
q.new_flow_cnt = 0;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
709
q.old_flow_cnt = 0;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
727
q.initial_quantum = 10 * psched_mtu;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
728
q.quantum = 2 * psched_mtu;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
729
q.flow_refill_delay = 40;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
730
q.flow_plimit = 100;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
731
q.horizon = 10ULL * NSEC_PER_SEC;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
732
q.horizon_drop = 1;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
733
q.orphan_mask = 1024 - 1;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
734
q.timer_slack = 10 * NSEC_PER_USEC;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
735
q.time_next_delayed_flow = ~0ULL;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
736
q.unthrottle_latency_ns = 0ULL;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
737
q.new_flow_cnt = 0;
tools/testing/selftests/bpf/progs/bpf_qdisc_fq.c
738
q.old_flow_cnt = 0;
tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
241
void (*q)(int);
tools/testing/selftests/bpf/progs/test_map_lock.c
39
struct array_elem *q;
tools/testing/selftests/bpf/progs/test_map_lock.c
51
q = bpf_map_lookup_elem(&array_map, &key);
tools/testing/selftests/bpf/progs/test_map_lock.c
52
if (!q)
tools/testing/selftests/bpf/progs/test_map_lock.c
54
bpf_spin_lock(&q->lock);
tools/testing/selftests/bpf/progs/test_map_lock.c
56
q->var[i] = rnd;
tools/testing/selftests/bpf/progs/test_map_lock.c
57
bpf_spin_unlock(&q->lock);
tools/testing/selftests/bpf/progs/test_spin_lock.c
55
struct bpf_vqueue *q;
tools/testing/selftests/bpf/progs/test_spin_lock.c
80
q = bpf_map_lookup_elem(&vqueue, &key);
tools/testing/selftests/bpf/progs/test_spin_lock.c
81
if (!q)
tools/testing/selftests/bpf/progs/test_spin_lock.c
84
bpf_spin_lock(&q->lock);
tools/testing/selftests/bpf/progs/test_spin_lock.c
85
q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate);
tools/testing/selftests/bpf/progs/test_spin_lock.c
86
q->lasttime = curtime;
tools/testing/selftests/bpf/progs/test_spin_lock.c
87
if (q->credit > max_credit)
tools/testing/selftests/bpf/progs/test_spin_lock.c
88
q->credit = max_credit;
tools/testing/selftests/bpf/progs/test_spin_lock.c
89
q->credit -= pkt_len;
tools/testing/selftests/bpf/progs/test_spin_lock.c
90
credit = q->credit;
tools/testing/selftests/bpf/progs/test_spin_lock.c
91
bpf_spin_unlock(&q->lock);
tools/testing/selftests/bpf/test_verifier.c
1308
const char *p, *q;
tools/testing/selftests/bpf/test_verifier.c
1325
q = strstr(log, needle);
tools/testing/selftests/bpf/test_verifier.c
1326
if (!q) {
tools/testing/selftests/bpf/test_verifier.c
1331
log = q + len;
tools/testing/selftests/bpf/trace_helpers.c
253
struct procmap_query q;
tools/testing/selftests/bpf/trace_helpers.c
256
memset(&q, 0, sizeof(q));
tools/testing/selftests/bpf/trace_helpers.c
257
q.size = sizeof(q);
tools/testing/selftests/bpf/trace_helpers.c
258
q.query_flags = query_flags;
tools/testing/selftests/bpf/trace_helpers.c
259
q.query_addr = (__u64)addr;
tools/testing/selftests/bpf/trace_helpers.c
260
q.vma_name_addr = (__u64)path_buf;
tools/testing/selftests/bpf/trace_helpers.c
261
q.vma_name_size = sizeof(path_buf);
tools/testing/selftests/bpf/trace_helpers.c
262
q.build_id_addr = (__u64)build_id_buf;
tools/testing/selftests/bpf/trace_helpers.c
263
q.build_id_size = sizeof(build_id_buf);
tools/testing/selftests/bpf/trace_helpers.c
265
err = ioctl(fd, PROCMAP_QUERY, &q);
tools/testing/selftests/bpf/trace_helpers.c
277
(long)addr, (long)q.vma_start, (long)q.vma_end,
tools/testing/selftests/bpf/trace_helpers.c
278
(q.vma_flags & PROCMAP_QUERY_VMA_READABLE) ? 'r' : '-',
tools/testing/selftests/bpf/trace_helpers.c
279
(q.vma_flags & PROCMAP_QUERY_VMA_WRITABLE) ? 'w' : '-',
tools/testing/selftests/bpf/trace_helpers.c
280
(q.vma_flags & PROCMAP_QUERY_VMA_EXECUTABLE) ? 'x' : '-',
tools/testing/selftests/bpf/trace_helpers.c
281
(q.vma_flags & PROCMAP_QUERY_VMA_SHARED) ? 's' : 'p',
tools/testing/selftests/bpf/trace_helpers.c
282
(long)q.vma_offset, q.dev_major, q.dev_minor, (long)q.inode,
tools/testing/selftests/bpf/trace_helpers.c
283
q.vma_name_size ? path_buf : "",
tools/testing/selftests/bpf/trace_helpers.c
284
q.build_id_size ? "YES" : "NO",
tools/testing/selftests/bpf/trace_helpers.c
285
q.build_id_size);
tools/testing/selftests/bpf/trace_helpers.c
288
*start = q.vma_start;
tools/testing/selftests/bpf/trace_helpers.c
289
*offset = q.vma_offset;
tools/testing/selftests/bpf/trace_helpers.c
290
*flags = q.vma_flags;
tools/testing/selftests/cgroup/test_hugetlb_memcg.c
25
char *p, *q, *path = "/proc/meminfo", *tag = "Hugepagesize:";
tools/testing/selftests/cgroup/test_hugetlb_memcg.c
55
val = strtol(p, &q, 0);
tools/testing/selftests/cgroup/test_hugetlb_memcg.c
56
if (*q != ' ') {
tools/testing/selftests/mm/hmm-tests.c
782
char *p, *q;
tools/testing/selftests/mm/hmm-tests.c
812
val = strtol(p, &q, 0);
tools/testing/selftests/mm/hmm-tests.c
813
if (*q != ' ') {
tools/testing/selftests/net/tcp_ao/lib/repair.c
43
struct tcp_sock_queue *q)
tools/testing/selftests/net/tcp_ao/lib/repair.c
51
len = sizeof(q->seq);
tools/testing/selftests/net/tcp_ao/lib/repair.c
52
ret = getsockopt(sk, SOL_TCP, TCP_QUEUE_SEQ, &q->seq, &len);
tools/testing/selftests/net/tcp_ao/lib/repair.c
53
if (ret || len != sizeof(q->seq))
tools/testing/selftests/net/tcp_ao/lib/repair.c
57
q->buf = NULL;
tools/testing/selftests/net/tcp_ao/lib/repair.c
61
q->buf = malloc(qlen);
tools/testing/selftests/net/tcp_ao/lib/repair.c
62
if (q->buf == NULL)
tools/testing/selftests/net/tcp_ao/lib/repair.c
64
ret = recv(sk, q->buf, qlen, MSG_PEEK | MSG_DONTWAIT);
tools/testing/selftests/net/tcp_ao/lib/setup.c
185
unsigned int q = stage_nr;
tools/testing/selftests/net/tcp_ao/lib/setup.c
188
stage_threads[q]++;
tools/testing/selftests/net/tcp_ao/lib/setup.c
189
if (stage_threads[q] == nr_threads) {
tools/testing/selftests/net/tcp_ao/lib/setup.c
194
while (stage_threads[q] < nr_threads)
tools/testing/selftests/powerpc/copyloops/exc_validate.c
103
q = p + page_size - MAX_LEN;
tools/testing/selftests/powerpc/copyloops/exc_validate.c
109
do_one_test(q+dst, q+src, len);
tools/testing/selftests/powerpc/copyloops/exc_validate.c
82
static char *p, *q;
tools/testing/selftests/proc/proc-maps-race.c
325
struct procmap_query q;
tools/testing/selftests/proc/proc-maps-race.c
327
memset(&q, 0, sizeof(q));
tools/testing/selftests/proc/proc-maps-race.c
328
q.size = sizeof(q);
tools/testing/selftests/proc/proc-maps-race.c
330
q.query_addr = (unsigned long long)addr;
tools/testing/selftests/proc/proc-maps-race.c
331
q.query_flags = 0;
tools/testing/selftests/proc/proc-maps-race.c
332
if (ioctl(maps_fd, PROCMAP_QUERY, &q))
tools/testing/selftests/proc/proc-maps-race.c
335
*vma_start = q.vma_start;
tools/testing/selftests/proc/proc-maps-race.c
336
*vma_end = q.vma_end;
tools/testing/selftests/proc/proc-pid-vm.c
503
struct procmap_query q;
tools/testing/selftests/proc/proc-pid-vm.c
512
memset(&q, 0, sizeof(q));
tools/testing/selftests/proc/proc-pid-vm.c
513
q.size = sizeof(q);
tools/testing/selftests/proc/proc-pid-vm.c
514
q.query_addr = VADDR;
tools/testing/selftests/proc/proc-pid-vm.c
515
q.query_flags = 0;
tools/testing/selftests/proc/proc-pid-vm.c
516
q.vma_name_addr = (__u64)(unsigned long)path_buf;
tools/testing/selftests/proc/proc-pid-vm.c
517
q.vma_name_size = sizeof(path_buf);
tools/testing/selftests/proc/proc-pid-vm.c
519
err = ioctl(fd, PROCMAP_QUERY, &q);
tools/testing/selftests/proc/proc-pid-vm.c
522
assert(q.query_addr == VADDR);
tools/testing/selftests/proc/proc-pid-vm.c
523
assert(q.query_flags == 0);
tools/testing/selftests/proc/proc-pid-vm.c
525
assert(q.vma_flags == (PROCMAP_QUERY_VMA_READABLE | PROCMAP_QUERY_VMA_EXECUTABLE));
tools/testing/selftests/proc/proc-pid-vm.c
526
assert(q.vma_start == VADDR);
tools/testing/selftests/proc/proc-pid-vm.c
527
assert(q.vma_end == VADDR + PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
528
assert(q.vma_page_size == PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
530
assert(q.vma_offset == 0);
tools/testing/selftests/proc/proc-pid-vm.c
531
assert(q.inode == st.st_ino);
tools/testing/selftests/proc/proc-pid-vm.c
532
assert(q.dev_major == MAJOR(st.st_dev));
tools/testing/selftests/proc/proc-pid-vm.c
533
assert(q.dev_minor == MINOR(st.st_dev));
tools/testing/selftests/proc/proc-pid-vm.c
537
assert(q.vma_name_size == strlen(exp_path_buf) + 1);
tools/testing/selftests/proc/proc-pid-vm.c
541
memset(&q, 0, sizeof(q));
tools/testing/selftests/proc/proc-pid-vm.c
542
q.size = sizeof(q);
tools/testing/selftests/proc/proc-pid-vm.c
543
q.query_addr = VADDR - 1;
tools/testing/selftests/proc/proc-pid-vm.c
544
q.query_flags = 0; /* exact match */
tools/testing/selftests/proc/proc-pid-vm.c
546
err = ioctl(fd, PROCMAP_QUERY, &q);
tools/testing/selftests/proc/proc-pid-vm.c
551
memset(&q, 0, sizeof(q));
tools/testing/selftests/proc/proc-pid-vm.c
552
q.size = sizeof(q);
tools/testing/selftests/proc/proc-pid-vm.c
553
q.query_addr = VADDR - 1;
tools/testing/selftests/proc/proc-pid-vm.c
554
q.query_flags = PROCMAP_QUERY_COVERING_OR_NEXT_VMA;
tools/testing/selftests/proc/proc-pid-vm.c
556
err = ioctl(fd, PROCMAP_QUERY, &q);
tools/testing/selftests/proc/proc-pid-vm.c
559
assert(q.query_addr == VADDR - 1);
tools/testing/selftests/proc/proc-pid-vm.c
560
assert(q.query_flags == PROCMAP_QUERY_COVERING_OR_NEXT_VMA);
tools/testing/selftests/proc/proc-pid-vm.c
561
assert(q.vma_start == VADDR);
tools/testing/selftests/proc/proc-pid-vm.c
562
assert(q.vma_end == VADDR + PAGE_SIZE);
tools/testing/selftests/proc/proc-pid-vm.c
565
memset(&q, 0, sizeof(q));
tools/testing/selftests/proc/proc-pid-vm.c
566
q.size = sizeof(q);
tools/testing/selftests/proc/proc-pid-vm.c
567
q.query_addr = VADDR + PAGE_SIZE; /* point right after the VMA */
tools/testing/selftests/proc/proc-pid-vm.c
568
q.query_flags = PROCMAP_QUERY_COVERING_OR_NEXT_VMA;
tools/testing/selftests/proc/proc-pid-vm.c
570
err = ioctl(fd, PROCMAP_QUERY, &q);
tools/testing/selftests/proc/proc-pid-vm.c
575
memset(&q, 0, sizeof(q));
tools/testing/selftests/proc/proc-pid-vm.c
576
q.size = sizeof(q);
tools/testing/selftests/proc/proc-pid-vm.c
577
q.query_addr = VADDR;
tools/testing/selftests/proc/proc-pid-vm.c
578
q.query_flags = PROCMAP_QUERY_VMA_WRITABLE;
tools/testing/selftests/proc/proc-pid-vm.c
580
err = ioctl(fd, PROCMAP_QUERY, &q);
tools/testing/selftests/timers/mqueue-lat.c
61
mqd_t q;
tools/testing/selftests/timers/mqueue-lat.c
66
q = mq_open("/foo", O_CREAT | O_RDONLY, 0666, NULL);
tools/testing/selftests/timers/mqueue-lat.c
67
if (q < 0) {
tools/testing/selftests/timers/mqueue-lat.c
71
mq_getattr(q, &attr);
tools/testing/selftests/timers/mqueue-lat.c
84
ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target);
tools/testing/selftests/timers/mqueue-lat.c
92
mq_close(q);
tools/testing/selftests/ublk/batch.c
138
struct ublk_queue *q = &t->dev->q[0];
tools/testing/selftests/ublk/batch.c
150
if (ublk_queue_use_auto_zc(q)) {
tools/testing/selftests/ublk/batch.c
151
if (ublk_queue_auto_zc_fallback(q))
tools/testing/selftests/ublk/batch.c
153
} else if (!ublk_queue_no_buf(q))
tools/testing/selftests/ublk/batch.c
278
struct ublk_queue *q,
tools/testing/selftests/ublk/batch.c
291
ublk_init_batch_cmd(t, q->q_id, sqe, UBLK_U_IO_FETCH_IO_CMDS, 2, nr_elem,
tools/testing/selftests/ublk/batch.c
308
struct ublk_queue *q = &t->dev->q[i];
tools/testing/selftests/ublk/batch.c
311
ublk_batch_queue_fetch(t, q, j++);
tools/testing/selftests/ublk/batch.c
312
ublk_batch_queue_fetch(t, q, j++);
tools/testing/selftests/ublk/batch.c
318
struct ublk_queue *q,
tools/testing/selftests/ublk/batch.c
330
if ((end - start) / 2 > q->q_depth) {
tools/testing/selftests/ublk/batch.c
344
if (tag >= q->q_depth)
tools/testing/selftests/ublk/batch.c
347
if (q->tgt_ops->queue_io)
tools/testing/selftests/ublk/batch.c
348
q->tgt_ops->queue_io(t, q, tag);
tools/testing/selftests/ublk/batch.c
354
static int __ublk_batch_queue_prep_io_cmds(struct ublk_thread *t, struct ublk_queue *q)
tools/testing/selftests/ublk/batch.c
356
unsigned short nr_elem = q->q_depth;
tools/testing/selftests/ublk/batch.c
366
ublk_assert(nr_elem == q->q_depth);
tools/testing/selftests/ublk/batch.c
371
struct ublk_io *io = &q->ios[i];
tools/testing/selftests/ublk/batch.c
376
if (ublk_queue_use_auto_zc(q))
tools/testing/selftests/ublk/batch.c
377
elem->buf_index = ublk_batch_io_buf_idx(t, q, i);
tools/testing/selftests/ublk/batch.c
378
else if (!ublk_queue_no_buf(q))
tools/testing/selftests/ublk/batch.c
385
ublk_init_batch_cmd(t, q->q_id, sqe, UBLK_U_IO_PREP_IO_CMDS,
tools/testing/selftests/ublk/batch.c
391
int ublk_batch_queue_prep_io_cmds(struct ublk_thread *t, struct ublk_queue *q)
tools/testing/selftests/ublk/batch.c
395
pthread_spin_lock(&q->lock);
tools/testing/selftests/ublk/batch.c
396
if (q->flags & UBLKS_Q_PREPARED)
tools/testing/selftests/ublk/batch.c
398
ret = __ublk_batch_queue_prep_io_cmds(t, q);
tools/testing/selftests/ublk/batch.c
400
q->flags |= UBLKS_Q_PREPARED;
tools/testing/selftests/ublk/batch.c
402
pthread_spin_unlock(&q->lock);
tools/testing/selftests/ublk/batch.c
429
struct ublk_queue *q;
tools/testing/selftests/ublk/batch.c
442
q = &t->dev->q[q_id];
tools/testing/selftests/ublk/batch.c
443
buf_idx = ublk_compl_batch_fetch(t, q, cqe);
tools/testing/selftests/ublk/batch.c
450
ublk_batch_queue_fetch(t, q, buf_idx);
tools/testing/selftests/ublk/batch.c
523
void ublk_batch_complete_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/batch.c
526
unsigned q_t_idx = ublk_queue_idx_in_thread(t, q);
tools/testing/selftests/ublk/batch.c
529
struct ublk_io *io = &q->ios[tag];
tools/testing/selftests/ublk/batch.c
534
ublk_assert(q->q_id == cb->q_id);
tools/testing/selftests/ublk/batch.c
538
elem->buf_index = ublk_batch_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/batch.c
541
if (!ublk_queue_no_buf(q))
tools/testing/selftests/ublk/fault_inject.c
43
struct ublk_queue *q, int tag)
tools/testing/selftests/ublk/fault_inject.c
45
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/fault_inject.c
48
.tv_nsec = (long long)q->dev->private_data,
tools/testing/selftests/ublk/fault_inject.c
53
sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1);
tools/testing/selftests/ublk/fault_inject.c
55
ublk_queued_tgt_io(t, q, tag, 1);
tools/testing/selftests/ublk/fault_inject.c
61
struct ublk_queue *q,
tools/testing/selftests/ublk/fault_inject.c
65
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/fault_inject.c
70
if (ublk_completed_tgt_io(t, q, tag))
tools/testing/selftests/ublk/fault_inject.c
71
ublk_complete_io(t, q, tag, iod->nr_sectors << 9);
tools/testing/selftests/ublk/file_backed.c
101
ret = loop_queue_flush_io(t, q, iod, tag);
tools/testing/selftests/ublk/file_backed.c
109
ret = loop_queue_tgt_rw_io(t, q, iod, tag);
tools/testing/selftests/ublk/file_backed.c
121
static int ublk_loop_queue_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/file_backed.c
124
int queued = loop_queue_tgt_io(t, q, tag);
tools/testing/selftests/ublk/file_backed.c
126
ublk_queued_tgt_io(t, q, tag, queued);
tools/testing/selftests/ublk/file_backed.c
130
static void ublk_loop_io_done(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/file_backed.c
135
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/file_backed.c
143
? ublk_integrity_data_len(q, cqe->res)
tools/testing/selftests/ublk/file_backed.c
154
if (ublk_completed_tgt_io(t, q, tag))
tools/testing/selftests/ublk/file_backed.c
155
ublk_complete_io(t, q, tag, io->result);
tools/testing/selftests/ublk/file_backed.c
16
static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/file_backed.c
23
io_uring_prep_fsync(sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, IORING_FSYNC_DATASYNC);
tools/testing/selftests/ublk/file_backed.c
26
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
30
static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/file_backed.c
34
unsigned zc = ublk_queue_use_zc(q);
tools/testing/selftests/ublk/file_backed.c
35
unsigned auto_zc = ublk_queue_use_auto_zc(q);
tools/testing/selftests/ublk/file_backed.c
37
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/file_backed.c
42
unsigned short buf_index = ublk_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/file_backed.c
47
io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 2),
tools/testing/selftests/ublk/file_backed.c
49
ublk_integrity_len(q, len),
tools/testing/selftests/ublk/file_backed.c
50
ublk_integrity_len(q, offset));
tools/testing/selftests/ublk/file_backed.c
53
sqe[0]->user_data = build_user_data(tag, ublk_op, 1, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
61
io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/,
tools/testing/selftests/ublk/file_backed.c
69
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
75
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_index);
tools/testing/selftests/ublk/file_backed.c
78
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
80
io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
tools/testing/selftests/ublk/file_backed.c
85
sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
87
io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_index);
tools/testing/selftests/ublk/file_backed.c
88
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/file_backed.c
93
static int loop_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, int tag)
tools/testing/selftests/ublk/file_backed.c
95
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/kublk.c
1076
memcpy(&ctx->shadow_dev->q, &dev->q, sizeof(dev->q));
tools/testing/selftests/ublk/kublk.c
1130
dev->q[i].dev = dev;
tools/testing/selftests/ublk/kublk.c
1131
dev->q[i].q_id = i;
tools/testing/selftests/ublk/kublk.c
1133
ret = ublk_queue_init(&dev->q[i], extra_flags,
tools/testing/selftests/ublk/kublk.c
1195
ublk_queue_deinit(&dev->q[i]);
tools/testing/selftests/ublk/kublk.c
1572
memcpy(&dev->q, ctx->shadow_dev->q, sizeof(dev->q));
tools/testing/selftests/ublk/kublk.c
415
static int ublk_queue_cmd_buf_sz(struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.c
417
return __ublk_queue_cmd_buf_sz(q->q_depth);
tools/testing/selftests/ublk/kublk.c
420
static void ublk_queue_deinit(struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.c
423
int nr_ios = q->q_depth;
tools/testing/selftests/ublk/kublk.c
425
if (q->io_cmd_buf)
tools/testing/selftests/ublk/kublk.c
426
munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
tools/testing/selftests/ublk/kublk.c
429
free(q->ios[i].buf_addr);
tools/testing/selftests/ublk/kublk.c
430
free(q->ios[i].integrity_buf);
tools/testing/selftests/ublk/kublk.c
449
static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags,
tools/testing/selftests/ublk/kublk.c
452
struct ublk_dev *dev = q->dev;
tools/testing/selftests/ublk/kublk.c
458
pthread_spin_init(&q->lock, PTHREAD_PROCESS_PRIVATE);
tools/testing/selftests/ublk/kublk.c
459
q->tgt_ops = dev->tgt.ops;
tools/testing/selftests/ublk/kublk.c
460
q->flags = 0;
tools/testing/selftests/ublk/kublk.c
461
q->q_depth = depth;
tools/testing/selftests/ublk/kublk.c
462
q->flags = dev->dev_info.flags;
tools/testing/selftests/ublk/kublk.c
463
q->flags |= extra_flags;
tools/testing/selftests/ublk/kublk.c
464
q->metadata_size = metadata_size;
tools/testing/selftests/ublk/kublk.c
467
q->ublk_fd = dev->fds[0];
tools/testing/selftests/ublk/kublk.c
469
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
tools/testing/selftests/ublk/kublk.c
470
off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz();
tools/testing/selftests/ublk/kublk.c
471
q->io_cmd_buf = mmap(0, cmd_buf_size, PROT_READ,
tools/testing/selftests/ublk/kublk.c
473
if (q->io_cmd_buf == MAP_FAILED) {
tools/testing/selftests/ublk/kublk.c
475
q->dev->dev_info.dev_id, q->q_id);
tools/testing/selftests/ublk/kublk.c
480
integrity_size = ublk_integrity_len(q, io_buf_size);
tools/testing/selftests/ublk/kublk.c
481
for (i = 0; i < q->q_depth; i++) {
tools/testing/selftests/ublk/kublk.c
482
q->ios[i].buf_addr = NULL;
tools/testing/selftests/ublk/kublk.c
483
q->ios[i].flags = UBLKS_IO_NEED_FETCH_RQ | UBLKS_IO_FREE;
tools/testing/selftests/ublk/kublk.c
484
q->ios[i].tag = i;
tools/testing/selftests/ublk/kublk.c
487
q->ios[i].integrity_buf = malloc(integrity_size);
tools/testing/selftests/ublk/kublk.c
488
if (!q->ios[i].integrity_buf) {
tools/testing/selftests/ublk/kublk.c
490
dev->dev_info.dev_id, q->q_id, i,
tools/testing/selftests/ublk/kublk.c
497
if (ublk_queue_no_buf(q))
tools/testing/selftests/ublk/kublk.c
500
if (posix_memalign((void **)&q->ios[i].buf_addr,
tools/testing/selftests/ublk/kublk.c
503
dev->dev_info.dev_id, q->q_id, i);
tools/testing/selftests/ublk/kublk.c
510
ublk_queue_deinit(q);
tools/testing/selftests/ublk/kublk.c
512
dev->dev_info.dev_id, q->q_id);
tools/testing/selftests/ublk/kublk.c
634
const struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.c
640
if (q->tgt_ops->buf_index)
tools/testing/selftests/ublk/kublk.c
641
buf.index = q->tgt_ops->buf_index(t, q, tag);
tools/testing/selftests/ublk/kublk.c
643
buf.index = ublk_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/kublk.c
645
if (ublk_queue_auto_zc_fallback(q))
tools/testing/selftests/ublk/kublk.c
656
const struct ublk_queue *q = ublk_io_to_queue(io);
tools/testing/selftests/ublk/kublk.c
657
const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag);
tools/testing/selftests/ublk/kublk.c
658
__u64 off = ublk_user_copy_offset(q->q_id, io->tag);
tools/testing/selftests/ublk/kublk.c
671
copied = pread(q->ublk_fd, addr, copy_len, off);
tools/testing/selftests/ublk/kublk.c
673
copied = pwrite(q->ublk_fd, addr, copy_len, off);
tools/testing/selftests/ublk/kublk.c
685
len = ublk_integrity_len(q, iod->nr_sectors << 9);
tools/testing/selftests/ublk/kublk.c
686
off = ublk_user_copy_offset(q->q_id, io->tag);
tools/testing/selftests/ublk/kublk.c
689
copied = pread(q->ublk_fd, io->integrity_buf, len, off);
tools/testing/selftests/ublk/kublk.c
691
copied = pwrite(q->ublk_fd, io->integrity_buf, len, off);
tools/testing/selftests/ublk/kublk.c
699
struct ublk_queue *q = ublk_io_to_queue(io);
tools/testing/selftests/ublk/kublk.c
720
if (ublk_queue_use_user_copy(q))
tools/testing/selftests/ublk/kublk.c
744
sqe[0]->fd = ublk_get_registered_fd(q, 0); /* dev->fds[0] */
tools/testing/selftests/ublk/kublk.c
746
if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD)
tools/testing/selftests/ublk/kublk.c
752
cmd->q_id = q->q_id;
tools/testing/selftests/ublk/kublk.c
753
if (!ublk_queue_no_buf(q) && !ublk_queue_use_user_copy(q))
tools/testing/selftests/ublk/kublk.c
758
if (ublk_queue_use_auto_zc(q))
tools/testing/selftests/ublk/kublk.c
759
ublk_set_auto_buf_reg(t, q, sqe[0], io->tag);
tools/testing/selftests/ublk/kublk.c
761
user_data = build_user_data(io->tag, _IOC_NR(cmd_op), 0, q->q_id, 0);
tools/testing/selftests/ublk/kublk.c
769
__func__, t->idx, q->q_id, io->tag, cmd_op,
tools/testing/selftests/ublk/kublk.c
776
struct ublk_queue *q;
tools/testing/selftests/ublk/kublk.c
796
q = &t->dev->q[q_id];
tools/testing/selftests/ublk/kublk.c
797
io = &q->ios[tag];
tools/testing/selftests/ublk/kublk.c
806
struct ublk_queue *q = &t->dev->q[t->idx];
tools/testing/selftests/ublk/kublk.c
807
for (i = 0; i < q->q_depth; i++) {
tools/testing/selftests/ublk/kublk.c
808
io = &q->ios[i];
tools/testing/selftests/ublk/kublk.c
826
struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.c
831
__func__, cqe->res, q->q_id,
tools/testing/selftests/ublk/kublk.c
835
if (q->tgt_ops->tgt_io_done)
tools/testing/selftests/ublk/kublk.c
836
q->tgt_ops->tgt_io_done(t, q, cqe);
tools/testing/selftests/ublk/kublk.c
840
struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.c
846
struct ublk_io *io = &q->ios[tag];
tools/testing/selftests/ublk/kublk.c
856
ublk_assert(tag < q->q_depth);
tools/testing/selftests/ublk/kublk.c
858
if (ublk_queue_use_user_copy(q))
tools/testing/selftests/ublk/kublk.c
861
if (q->tgt_ops->queue_io)
tools/testing/selftests/ublk/kublk.c
862
q->tgt_ops->queue_io(t, q, tag);
tools/testing/selftests/ublk/kublk.c
900
ublksrv_handle_tgt_cqe(t, &dev->q[q_id], cqe);
tools/testing/selftests/ublk/kublk.c
907
ublk_handle_uring_cmd(t, &dev->q[q_id], cqe);
tools/testing/selftests/ublk/kublk.c
976
struct ublk_queue *q = &t->dev->q[i];
tools/testing/selftests/ublk/kublk.c
986
ret = ublk_batch_queue_prep_io_cmds(t, q);
tools/testing/selftests/ublk/kublk.h
253
struct ublk_queue q[UBLK_MAX_QUEUES];
tools/testing/selftests/ublk/kublk.h
272
static inline int ublk_queue_batch_io(const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
274
return __ublk_use_batch_io(q->flags);
tools/testing/selftests/ublk/kublk.h
305
static inline size_t ublk_integrity_len(const struct ublk_queue *q, size_t len)
tools/testing/selftests/ublk/kublk.h
308
return (len >> 9) * q->metadata_size;
tools/testing/selftests/ublk/kublk.h
312
ublk_integrity_data_len(const struct ublk_queue *q, size_t integrity_len)
tools/testing/selftests/ublk/kublk.h
314
return (integrity_len / q->metadata_size) << 9;
tools/testing/selftests/ublk/kublk.h
393
static inline int ublk_get_registered_fd(struct ublk_queue *q, int fd_index)
tools/testing/selftests/ublk/kublk.h
395
if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD) {
tools/testing/selftests/ublk/kublk.h
398
return q->ublk_fd;
tools/testing/selftests/ublk/kublk.h
406
struct ublk_queue *q, int tag, int q_id, __u64 index)
tools/testing/selftests/ublk/kublk.h
409
int dev_fd = ublk_get_registered_fd(q, 0);
tools/testing/selftests/ublk/kublk.h
413
if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD)
tools/testing/selftests/ublk/kublk.h
424
struct ublk_queue *q, int tag, int q_id, __u64 index)
tools/testing/selftests/ublk/kublk.h
426
__io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
tools/testing/selftests/ublk/kublk.h
431
struct ublk_queue *q, int tag, int q_id, __u64 index)
tools/testing/selftests/ublk/kublk.h
433
__io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
tools/testing/selftests/ublk/kublk.h
442
static inline void ublk_set_io_res(struct ublk_queue *q, int tag, int res)
tools/testing/selftests/ublk/kublk.h
444
q->ios[tag].result = res;
tools/testing/selftests/ublk/kublk.h
447
static inline int ublk_get_io_res(const struct ublk_queue *q, unsigned tag)
tools/testing/selftests/ublk/kublk.h
449
return q->ios[tag].result;
tools/testing/selftests/ublk/kublk.h
458
static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag)
tools/testing/selftests/ublk/kublk.h
460
return &q->io_cmd_buf[tag];
tools/testing/selftests/ublk/kublk.h
472
const struct ublk_thread *t, const struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.h
476
const struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.h
479
if (ublk_queue_batch_io(q))
tools/testing/selftests/ublk/kublk.h
480
return ublk_batch_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/kublk.h
481
return q->ios[tag].buf_index;
tools/testing/selftests/ublk/kublk.h
484
static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
tools/testing/selftests/ublk/kublk.h
486
return &q->ios[tag];
tools/testing/selftests/ublk/kublk.h
490
struct ublk_queue *q, unsigned tag)
tools/testing/selftests/ublk/kublk.h
492
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/kublk.h
499
static inline bool ublk_queue_use_zc(const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
501
return !!(q->flags & UBLK_F_SUPPORT_ZERO_COPY);
tools/testing/selftests/ublk/kublk.h
504
static inline bool ublk_queue_use_auto_zc(const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
506
return !!(q->flags & UBLK_F_AUTO_BUF_REG);
tools/testing/selftests/ublk/kublk.h
509
static inline bool ublk_queue_auto_zc_fallback(const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
511
return !!(q->flags & UBLKS_Q_AUTO_BUF_REG_FALLBACK);
tools/testing/selftests/ublk/kublk.h
514
static inline bool ublk_queue_use_user_copy(const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
516
return !!(q->flags & UBLK_F_USER_COPY);
tools/testing/selftests/ublk/kublk.h
519
static inline int ublk_queue_no_buf(const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
521
return ublk_queue_use_zc(q) || ublk_queue_use_auto_zc(q);
tools/testing/selftests/ublk/kublk.h
530
const struct ublk_queue *q)
tools/testing/selftests/ublk/kublk.h
534
idx = t->q_map[q->q_id];
tools/testing/selftests/ublk/kublk.h
544
const struct ublk_thread *t, const struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.h
547
return ublk_queue_idx_in_thread(t, q) * q->q_depth + tag;
tools/testing/selftests/ublk/kublk.h
551
int ublk_batch_queue_prep_io_cmds(struct ublk_thread *t, struct ublk_queue *q);
tools/testing/selftests/ublk/kublk.h
569
void ublk_batch_complete_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.h
574
static inline int ublk_complete_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.h
577
if (ublk_queue_batch_io(q)) {
tools/testing/selftests/ublk/kublk.h
578
ublk_batch_complete_io(t, q, tag, res);
tools/testing/selftests/ublk/kublk.h
581
struct ublk_io *io = &q->ios[tag];
tools/testing/selftests/ublk/kublk.h
588
static inline void ublk_queued_tgt_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/kublk.h
592
ublk_complete_io(t, q, tag, queued);
tools/testing/selftests/ublk/kublk.h
594
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/null.c
113
if (ublk_completed_tgt_io(t, q, tag))
tools/testing/selftests/ublk/null.c
114
ublk_complete_io(t, q, tag, io->result);
tools/testing/selftests/ublk/null.c
117
static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/null.c
120
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/null.c
121
unsigned auto_zc = ublk_queue_use_auto_zc(q);
tools/testing/selftests/ublk/null.c
122
unsigned zc = ublk_queue_use_zc(q);
tools/testing/selftests/ublk/null.c
126
queued = null_queue_auto_zc_io(t, q, tag);
tools/testing/selftests/ublk/null.c
128
queued = null_queue_zc_io(t, q, tag);
tools/testing/selftests/ublk/null.c
130
ublk_complete_io(t, q, tag, iod->nr_sectors << 9);
tools/testing/selftests/ublk/null.c
133
ublk_queued_tgt_io(t, q, tag, queued);
tools/testing/selftests/ublk/null.c
142
const struct ublk_queue *q, int tag)
tools/testing/selftests/ublk/null.c
144
if (ublk_queue_auto_zc_fallback(q))
tools/testing/selftests/ublk/null.c
146
return ublk_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/null.c
59
static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/null.c
62
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/null.c
64
unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/null.c
68
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/null.c
70
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/null.c
73
__setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx);
tools/testing/selftests/ublk/null.c
76
io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/null.c
77
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/null.c
83
static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/null.c
86
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/null.c
90
__setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag));
tools/testing/selftests/ublk/null.c
94
static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/null.c
99
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/stripe.c
126
static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/stripe.c
129
const struct stripe_conf *conf = get_chunk_shift(q);
tools/testing/selftests/ublk/stripe.c
130
unsigned auto_zc = (ublk_queue_use_auto_zc(q) != 0);
tools/testing/selftests/ublk/stripe.c
131
unsigned zc = (ublk_queue_use_zc(q) != 0);
tools/testing/selftests/ublk/stripe.c
135
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/stripe.c
138
unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
tools/testing/selftests/ublk/stripe.c
146
io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/stripe.c
149
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/stripe.c
167
sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, q->q_id, 1);
tools/testing/selftests/ublk/stripe.c
172
io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, buf_idx);
tools/testing/selftests/ublk/stripe.c
174
tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1);
tools/testing/selftests/ublk/stripe.c
181
static int handle_flush(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/stripe.c
184
const struct stripe_conf *conf = get_chunk_shift(q);
tools/testing/selftests/ublk/stripe.c
192
sqe[i]->user_data = build_user_data(tag, UBLK_IO_OP_FLUSH, 0, q->q_id, 1);
tools/testing/selftests/ublk/stripe.c
197
static int stripe_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/stripe.c
200
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/stripe.c
206
ret = handle_flush(t, q, iod, tag);
tools/testing/selftests/ublk/stripe.c
214
ret = stripe_queue_tgt_rw_io(t, q, iod, tag);
tools/testing/selftests/ublk/stripe.c
225
static int ublk_stripe_queue_io(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/stripe.c
228
int queued = stripe_queue_tgt_io(t, q, tag);
tools/testing/selftests/ublk/stripe.c
230
ublk_queued_tgt_io(t, q, tag, queued);
tools/testing/selftests/ublk/stripe.c
234
static void ublk_stripe_io_done(struct ublk_thread *t, struct ublk_queue *q,
tools/testing/selftests/ublk/stripe.c
238
const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
tools/testing/selftests/ublk/stripe.c
240
struct ublk_io *io = ublk_get_io(q, tag);
tools/testing/selftests/ublk/stripe.c
266
if (ublk_completed_tgt_io(t, q, tag)) {
tools/testing/selftests/ublk/stripe.c
272
ublk_complete_io(t, q, tag, res);
tools/testing/selftests/ublk/stripe.c
28
static inline const struct stripe_conf *get_chunk_shift(const struct ublk_queue *q)
tools/testing/selftests/ublk/stripe.c
30
return (struct stripe_conf *)q->dev->private_data;
tools/tracing/latency/latency-collector.c
787
static void init_queue(struct queue *q)
tools/tracing/latency/latency-collector.c
789
q->next_prod_idx = 0;
tools/tracing/latency/latency-collector.c
790
q->next_cons_idx = 0;
tools/tracing/latency/latency-collector.c
791
mutex_init(&q->mutex, NULL);
tools/tracing/latency/latency-collector.c
792
errno = pthread_cond_init(&q->cond, NULL);
tools/tracing/latency/latency-collector.c
797
static __always_inline int queue_len(const struct queue *q)
tools/tracing/latency/latency-collector.c
799
if (q->next_prod_idx >= q->next_cons_idx)
tools/tracing/latency/latency-collector.c
800
return q->next_prod_idx - q->next_cons_idx;
tools/tracing/latency/latency-collector.c
802
return QUEUE_SIZE - q->next_cons_idx + q->next_prod_idx;
tools/tracing/latency/latency-collector.c
805
static __always_inline int queue_nr_free(const struct queue *q)
tools/tracing/latency/latency-collector.c
807
int nr_free = QUEUE_SIZE - queue_len(q);
tools/tracing/latency/latency-collector.c
823
static __always_inline void queue_push_to_back(struct queue *q,
tools/tracing/latency/latency-collector.c
826
q->entries[q->next_prod_idx] = *e;
tools/tracing/latency/latency-collector.c
827
queue_idx_inc(&q->next_prod_idx);
tools/tracing/latency/latency-collector.c
830
static __always_inline struct entry queue_pop_from_front(struct queue *q)
tools/tracing/latency/latency-collector.c
832
struct entry e = q->entries[q->next_cons_idx];
tools/tracing/latency/latency-collector.c
834
queue_idx_inc(&q->next_cons_idx);
tools/tracing/latency/latency-collector.c
838
static __always_inline void queue_cond_signal(struct queue *q)
tools/tracing/latency/latency-collector.c
840
cond_signal(&q->cond);
tools/tracing/latency/latency-collector.c
843
static __always_inline void queue_cond_wait(struct queue *q)
tools/tracing/latency/latency-collector.c
845
cond_wait(&q->cond, &q->mutex);
tools/tracing/latency/latency-collector.c
848
static __always_inline int queue_try_to_add_entry(struct queue *q,
tools/tracing/latency/latency-collector.c
853
mutex_lock(&q->mutex);
tools/tracing/latency/latency-collector.c
854
if (queue_nr_free(q) > 0) {
tools/tracing/latency/latency-collector.c
855
queue_push_to_back(q, e);
tools/tracing/latency/latency-collector.c
856
cond_signal(&q->cond);
tools/tracing/latency/latency-collector.c
859
mutex_unlock(&q->mutex);
tools/tracing/latency/latency-collector.c
863
static struct entry queue_wait_for_entry(struct queue *q)
tools/tracing/latency/latency-collector.c
867
mutex_lock(&q->mutex);
tools/tracing/latency/latency-collector.c
870
e = queue_pop_from_front(q);
tools/tracing/latency/latency-collector.c
873
queue_cond_wait(q);
tools/tracing/latency/latency-collector.c
875
mutex_unlock(&q->mutex);