arch/alpha/kernel/smc37c669.c
2012
return rb(&SMC37c669->data_port);
arch/arc/net/bpf_jit_arcv2.c
839
static u8 arc_add_i(u8 *buf, u8 ra, u8 rb, s32 imm)
arch/arc/net/bpf_jit_arcv2.c
841
const u32 insn = OPC_ADD_I | OP_A(ra) | OP_B(rb);
arch/arc/net/bpf_jit_arcv2.c
923
static u8 arc_cmp_r(u8 *buf, u8 rb, u8 rc)
arch/arc/net/bpf_jit_arcv2.c
925
const u32 insn = OPC_CMP | OP_B(rb) | OP_C(rc);
arch/arc/net/bpf_jit_arcv2.c
940
static u8 arc_cmpz_r(u8 *buf, u8 rb, u8 rc)
arch/arc/net/bpf_jit_arcv2.c
942
const u32 insn = OPC_CMP | OP_B(rb) | OP_C(rc) | CC_equal;
arch/arc/net/bpf_jit_arcv2.c
950
static u8 arc_neg_r(u8 *buf, u8 ra, u8 rb)
arch/arc/net/bpf_jit_arcv2.c
952
const u32 insn = OPC_NEG | OP_A(ra) | OP_B(rb);
arch/arc/net/bpf_jit_arcv2.c
960
static u8 arc_mpy_r(u8 *buf, u8 ra, u8 rb, u8 rc)
arch/arc/net/bpf_jit_arcv2.c
962
const u32 insn = OPC_MPY | OP_A(ra) | OP_B(rb) | OP_C(rc);
arch/arc/net/bpf_jit_arcv2.c
970
static u8 arc_mpy_i(u8 *buf, u8 ra, u8 rb, s32 imm)
arch/arc/net/bpf_jit_arcv2.c
972
const u32 insn = OPC_MPYI | OP_A(ra) | OP_B(rb);
arch/mips/alchemy/common/usb.c
392
static inline int au1000_usb_init(unsigned long rb, int reg)
arch/mips/alchemy/common/usb.c
394
void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg);
arch/mips/alchemy/common/usb.c
425
static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg)
arch/mips/alchemy/common/usb.c
427
void __iomem *base = (void __iomem *)KSEG1ADDR(rb);
arch/mips/alchemy/common/usb.c
458
static inline int au1000_usb_control(int block, int enable, unsigned long rb,
arch/mips/alchemy/common/usb.c
465
__au1xx0_ohci_control(enable, rb, creg);
arch/openrisc/kernel/traps.c
273
unsigned int rb, op, jmp;
arch/openrisc/kernel/traps.c
280
rb = (jmp & 0x0000ffff) >> 11;
arch/openrisc/kernel/traps.c
304
regs->pc = regs->gpr[rb];
arch/openrisc/kernel/traps.c
307
regs->pc = regs->gpr[rb];
arch/openrisc/kernel/traps.c
365
unsigned int ra, rb;
arch/openrisc/kernel/traps.c
374
rb = (insn >> 11) & 0x1f;
arch/openrisc/kernel/traps.c
388
if (put_user(regs->gpr[rb], vaddr)) {
arch/openrisc/kernel/traps.c
400
*((unsigned long *)vaddr) = regs->gpr[rb];
arch/powerpc/include/asm/kvm_book3s_64.h
302
unsigned long rb = 0, va_low, sllp;
arch/powerpc/include/asm/kvm_book3s_64.h
320
rb = (v & ~0x7fUL) << 16; /* AVA field */
arch/powerpc/include/asm/kvm_book3s_64.h
344
rb |= sllp << 5; /* AP field */
arch/powerpc/include/asm/kvm_book3s_64.h
346
rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
arch/powerpc/include/asm/kvm_book3s_64.h
353
rb |= (va_low << b_pgshift) & 0x7ff000;
arch/powerpc/include/asm/kvm_book3s_64.h
357
rb &= ~((1ul << a_pgshift) - 1);
arch/powerpc/include/asm/kvm_book3s_64.h
364
rb |= ((va_low << aval_shift) & 0xfe);
arch/powerpc/include/asm/kvm_book3s_64.h
366
rb |= 1; /* L field */
arch/powerpc/include/asm/kvm_book3s_64.h
367
rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
arch/powerpc/include/asm/kvm_book3s_64.h
373
rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
arch/powerpc/include/asm/kvm_book3s_64.h
374
return rb;
arch/powerpc/include/asm/kvm_host.h
391
void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
arch/powerpc/include/asm/kvm_ppc.h
1095
static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
arch/powerpc/include/asm/kvm_ppc.h
1100
ea = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/include/asm/ppc-opcode.h
405
#define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \
arch/powerpc/include/asm/ppc-opcode.h
406
(0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
arch/powerpc/include/asm/ppc-opcode.h
407
#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \
arch/powerpc/include/asm/ppc-opcode.h
408
(0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
arch/powerpc/include/asm/ppc-opcode.h
409
#define PPC_RAW_TLBIEL_v205(rb, l) (0x7c000224 | ___PPC_RB(rb) | (l << 21))
arch/powerpc/include/asm/ppc-opcode.h
647
#define PPC_TLBIE_5(rb, rs, ric, prs, r) \
arch/powerpc/include/asm/ppc-opcode.h
648
stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
arch/powerpc/include/asm/ppc-opcode.h
649
#define PPC_TLBIEL(rb,rs,ric,prs,r) \
arch/powerpc/include/asm/ppc-opcode.h
650
stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r))
arch/powerpc/include/asm/ppc-opcode.h
651
#define PPC_TLBIEL_v205(rb, l) stringify_in_c(.long PPC_RAW_TLBIEL_v205(rb, l))
arch/powerpc/include/asm/trace.h
294
TP_PROTO(unsigned long lpid, unsigned long local, unsigned long rb,
arch/powerpc/include/asm/trace.h
297
TP_ARGS(lpid, local, rb, rs, ric, prs, r),
arch/powerpc/include/asm/trace.h
301
__field(unsigned long, rb)
arch/powerpc/include/asm/trace.h
311
__entry->rb = rb;
arch/powerpc/include/asm/trace.h
320
__entry->rb, __entry->rs, __entry->ric, __entry->prs,
arch/powerpc/kernel/btext.c
409
static void draw_byte_32(const unsigned char *font, unsigned int *base, int rb)
arch/powerpc/kernel/btext.c
426
base = (unsigned int *) ((char *)base + rb);
arch/powerpc/kernel/btext.c
430
static inline void draw_byte_16(const unsigned char *font, unsigned int *base, int rb)
arch/powerpc/kernel/btext.c
444
base = (unsigned int *) ((char *)base + rb);
arch/powerpc/kernel/btext.c
448
static inline void draw_byte_8(const unsigned char *font, unsigned int *base, int rb)
arch/powerpc/kernel/btext.c
460
base = (unsigned int *) ((char *)base + rb);
arch/powerpc/kernel/btext.c
469
int rb = dispDeviceRowBytes;
arch/powerpc/kernel/btext.c
475
draw_byte_32(font, (unsigned int *)base, rb);
arch/powerpc/kernel/btext.c
479
draw_byte_16(font, (unsigned int *)base, rb);
arch/powerpc/kernel/btext.c
482
draw_byte_8(font, (unsigned int *)base, rb);
arch/powerpc/kernel/kvm.c
367
static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
arch/powerpc/kernel/kvm.c
392
p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
arch/powerpc/kernel/time.c
919
u64 ra, rb, rc;
arch/powerpc/kernel/time.c
929
rb = ((u64)do_div(ra, divisor) << 32) + c;
arch/powerpc/kernel/time.c
932
rc = ((u64)do_div(rb, divisor) << 32) + d;
arch/powerpc/kernel/time.c
933
y = rb;
arch/powerpc/kernel/traps.c
553
unsigned int rb;
arch/powerpc/kernel/traps.c
556
rb = (*nip >> 11) & 0x1f;
arch/powerpc/kernel/traps.c
559
regs->gpr[rb] - _IO_BASE, nip);
arch/powerpc/kernel/traps.c
886
unsigned int ra, rb, t, i, sel, instr, rc;
arch/powerpc/kernel/traps.c
922
rb = (instr >> 11) & 0x1f;
arch/powerpc/kernel/traps.c
930
ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
arch/powerpc/kvm/book3s_64_mmu.c
363
static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
arch/powerpc/kvm/book3s_64_mmu.c
369
dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
arch/powerpc/kvm/book3s_64_mmu.c
371
esid = GET_ESID(rb);
arch/powerpc/kvm/book3s_64_mmu.c
372
esid_1t = GET_ESID_1T(rb);
arch/powerpc/kvm/book3s_64_mmu.c
373
slb_nr = rb & 0xfff;
arch/powerpc/kvm/book3s_64_mmu.c
384
slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
arch/powerpc/kvm/book3s_64_mmu.c
405
slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
arch/powerpc/kvm/book3s_64_mmu.c
492
u64 rb = 0, rs = 0;
arch/powerpc/kvm/book3s_64_mmu.c
515
rb |= (srnum & 0xf) << 28;
arch/powerpc/kvm/book3s_64_mmu.c
517
rb |= 1 << 27;
arch/powerpc/kvm/book3s_64_mmu.c
519
rb |= srnum;
arch/powerpc/kvm/book3s_64_mmu.c
526
kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
arch/powerpc/kvm/book3s_64_mmu_radix.c
321
unsigned long rb;
arch/powerpc/kvm/book3s_64_mmu_radix.c
338
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
arch/powerpc/kvm/book3s_64_mmu_radix.c
340
lpid, rb);
arch/powerpc/kvm/book3s_emulate.c
1049
ulong rb = get_rb(inst);
arch/powerpc/kvm/book3s_emulate.c
1063
dar += kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_emulate.c
241
int rb = get_rb(inst);
arch/powerpc/kvm/book3s_emulate.c
328
srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
arch/powerpc/kvm/book3s_emulate.c
343
(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
arch/powerpc/kvm/book3s_emulate.c
350
ulong addr = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_emulate.c
390
kvmppc_get_gpr(vcpu, rb));
arch/powerpc/kvm/book3s_emulate.c
397
kvmppc_get_gpr(vcpu, rb));
arch/powerpc/kvm/book3s_emulate.c
412
b = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_emulate.c
428
rb_val = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_emulate.c
439
rb_val = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_emulate.c
449
ulong rb_val = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_hv.c
1537
u32 inst, rb, thr;
arch/powerpc/kvm/book3s_hv.c
1548
rb = get_rb(inst);
arch/powerpc/kvm/book3s_hv.c
1552
arg = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_hv.c
1567
arg = kvmppc_get_gpr(vcpu, rb);
arch/powerpc/kvm/book3s_hv_builtin.c
607
unsigned long rb, set;
arch/powerpc/kvm/book3s_hv_builtin.c
609
rb = PPC_BIT(52); /* IS = 2 */
arch/powerpc/kvm/book3s_hv_builtin.c
613
: : "r" (rb), "i" (0), "i" (0), "i" (0),
arch/powerpc/kvm/book3s_hv_builtin.c
615
rb += PPC_BIT(51); /* increment set number */
arch/powerpc/kvm/book3s_hv_p9_entry.c
421
unsigned long rb, set;
arch/powerpc/kvm/book3s_hv_p9_entry.c
423
rb = PPC_BIT(52); /* IS = 2 */
arch/powerpc/kvm/book3s_hv_p9_entry.c
427
: : "r" (rb), "i" (1), "i" (1), "i" (2),
arch/powerpc/kvm/book3s_hv_p9_entry.c
430
rb += PPC_BIT(51); /* increment set number */
arch/powerpc/kvm/book3s_hv_p9_entry.c
433
: : "r" (rb), "i" (1), "i" (1), "i" (0),
arch/powerpc/kvm/book3s_hv_p9_entry.c
443
: : "r" (rb), "i" (0), "i" (0), "i" (0),
arch/powerpc/kvm/book3s_hv_p9_entry.c
445
rb += PPC_BIT(51); /* increment set number */
arch/powerpc/kvm/book3s_hv_ras.c
57
unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
arch/powerpc/kvm/book3s_hv_ras.c
60
rb = (rb & ~0xFFFul) | i; /* insert entry number */
arch/powerpc/kvm/book3s_hv_ras.c
61
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
arch/powerpc/kvm/book3s_hv_rm_mmu.c
1019
unsigned long rb;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
1029
rb = compute_tlbie_rb(hp0, hp1, pte_index);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
1030
do_tlbies(kvm, &rb, 1, 1, true);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
1037
unsigned long rb;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
1047
rb = compute_tlbie_rb(hp0, hp1, pte_index);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
1051
do_tlbies(kvm, &rb, 1, 1, false);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
429
unsigned long rb,rs,prs,r,ric;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
431
rb = PPC_BIT(52); /* IS = 2 */
arch/powerpc/kvm/book3s_hv_rm_mmu.c
443
: : "r"(rb), "i"(r), "i"(prs),
arch/powerpc/kvm/book3s_hv_rm_mmu.c
490
unsigned long v, r, rb;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
518
rb = compute_tlbie_rb(v, pte_r, pte_index);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
519
do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
680
unsigned long v, r, rb, mask, bits;
arch/powerpc/kvm/book3s_hv_rm_mmu.c
727
rb = compute_tlbie_rb(v, r, pte_index);
arch/powerpc/kvm/book3s_hv_rm_mmu.c
730
do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
arch/powerpc/kvm/book3s_pr.c
1498
u64 rb = sregs->u.s.ppc64.slb[i].slbe;
arch/powerpc/kvm/book3s_pr.c
1501
if (rb & SLB_ESID_V)
arch/powerpc/kvm/book3s_pr.c
1502
vcpu->arch.mmu.slbmte(vcpu, rs, rb);
arch/powerpc/kvm/book3s_pr_papr.c
113
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
arch/powerpc/kvm/book3s_pr_papr.c
114
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
arch/powerpc/kvm/book3s_pr_papr.c
155
unsigned long pteg, rb, flags;
arch/powerpc/kvm/book3s_pr_papr.c
200
rb = compute_tlbie_rb(pte[0], pte[1],
arch/powerpc/kvm/book3s_pr_papr.c
202
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
arch/powerpc/kvm/book3s_pr_papr.c
219
unsigned long rb, pteg, r, v;
arch/powerpc/kvm/book3s_pr_papr.c
246
rb = compute_tlbie_rb(v, r, pte_index);
arch/powerpc/kvm/book3s_pr_papr.c
247
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
arch/powerpc/kvm/book3s_pr_papr.c
91
unsigned long v = 0, pteg, rb;
arch/powerpc/kvm/e500_emulate.c
133
int rb = get_rb(inst);
arch/powerpc/kvm/e500_emulate.c
147
emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
arch/powerpc/kvm/e500_emulate.c
151
emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
arch/powerpc/kvm/e500_emulate.c
164
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
arch/powerpc/kvm/e500_emulate.c
170
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
arch/powerpc/kvm/e500_emulate.c
176
ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
arch/powerpc/kvm/e500_emulate.c
51
static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
arch/powerpc/kvm/e500_emulate.c
53
ulong param = vcpu->arch.regs.gpr[rb];
arch/powerpc/kvm/e500_emulate.c
63
static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
arch/powerpc/kvm/e500_emulate.c
65
ulong param = vcpu->arch.regs.gpr[rb];
arch/powerpc/kvm/e500_emulate.c
66
int prio = dbell2prio(rb);
arch/powerpc/lib/sstep.c
1357
unsigned int opcode, ra, rb, rc, rd, spr, u;
arch/powerpc/lib/sstep.c
1447
rb = (word >> 11) & 0x1f;
arch/powerpc/lib/sstep.c
1450
rb = (regs->ccr >> (31 - rb)) & 1;
arch/powerpc/lib/sstep.c
1451
val = (word >> (6 + ra * 2 + rb)) & 1;
arch/powerpc/lib/sstep.c
1482
rb = (word >> 11) & 0x1f;
arch/powerpc/lib/sstep.c
1534
"r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
arch/powerpc/lib/sstep.c
1540
"r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
arch/powerpc/lib/sstep.c
1546
"r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
arch/powerpc/lib/sstep.c
1629
op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
arch/powerpc/lib/sstep.c
1636
op->val = ROTATE(val, rb) & MASK32(mb, me);
arch/powerpc/lib/sstep.c
1642
rb = regs->gpr[rb] & 0x1f;
arch/powerpc/lib/sstep.c
1644
op->val = ROTATE(val, rb) & MASK32(mb, me);
arch/powerpc/lib/sstep.c
1681
sh = rb | ((word & 2) << 4);
arch/powerpc/lib/sstep.c
1701
sh = regs->gpr[rb] & 0x3f;
arch/powerpc/lib/sstep.c
1723
op->val = (val) ? val2 : regs->gpr[rb];
arch/powerpc/lib/sstep.c
1731
(int)regs->gpr[rb])))
arch/powerpc/lib/sstep.c
1736
if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
arch/powerpc/lib/sstep.c
179
int ra, rb;
arch/powerpc/lib/sstep.c
183
rb = (instr >> 11) & 0x1f;
arch/powerpc/lib/sstep.c
1839
val2 = regs->gpr[rb];
arch/powerpc/lib/sstep.c
184
ea = regs->gpr[rb];
arch/powerpc/lib/sstep.c
1852
val2 = regs->gpr[rb];
arch/powerpc/lib/sstep.c
1864
do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
arch/powerpc/lib/sstep.c
1872
regs->gpr[rb], 1);
arch/powerpc/lib/sstep.c
1877
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
arch/powerpc/lib/sstep.c
1882
regs->gpr[rb], 0);
arch/powerpc/lib/sstep.c
1887
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
arch/powerpc/lib/sstep.c
1891
op->val = regs->gpr[rb] - regs->gpr[ra];
arch/powerpc/lib/sstep.c
1896
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
arch/powerpc/lib/sstep.c
1901
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
arch/powerpc/lib/sstep.c
1910
regs->gpr[rb], regs->xer & XER_CA);
arch/powerpc/lib/sstep.c
1915
regs->gpr[rb], regs->xer & XER_CA);
arch/powerpc/lib/sstep.c
1934
op->val = regs->gpr[ra] * regs->gpr[rb];
arch/powerpc/lib/sstep.c
1944
(int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
1951
op->val = regs->gpr[ra] % regs->gpr[rb];
arch/powerpc/lib/sstep.c
1955
op->val = regs->gpr[ra] + regs->gpr[rb];
arch/powerpc/lib/sstep.c
1962
(unsigned int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
1966
op->val = regs->gpr[ra] / regs->gpr[rb];
arch/powerpc/lib/sstep.c
1971
(unsigned int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
1976
(long int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
1981
(int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
1987
"r" (regs->gpr[rb]));
arch/powerpc/lib/sstep.c
1992
"r" (regs->gpr[rb]));
arch/powerpc/lib/sstep.c
2021
(long int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
2028
(int) regs->gpr[rb];
arch/powerpc/lib/sstep.c
2046
op->val = regs->gpr[rd] & regs->gpr[rb];
arch/powerpc/lib/sstep.c
2050
op->val = regs->gpr[rd] & ~regs->gpr[rb];
arch/powerpc/lib/sstep.c
2058
op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
arch/powerpc/lib/sstep.c
2070
do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
arch/powerpc/lib/sstep.c
2074
op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
arch/powerpc/lib/sstep.c
2078
op->val = regs->gpr[rd] ^ regs->gpr[rb];
arch/powerpc/lib/sstep.c
2086
op->val = regs->gpr[rd] | ~regs->gpr[rb];
arch/powerpc/lib/sstep.c
2090
op->val = regs->gpr[rd] | regs->gpr[rb];
arch/powerpc/lib/sstep.c
2094
op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
arch/powerpc/lib/sstep.c
2132
sh = regs->gpr[rb] & 0x3f;
arch/powerpc/lib/sstep.c
2140
sh = regs->gpr[rb] & 0x3f;
arch/powerpc/lib/sstep.c
2149
sh = regs->gpr[rb] & 0x3f;
arch/powerpc/lib/sstep.c
2162
sh = rb;
arch/powerpc/lib/sstep.c
2175
sh = regs->gpr[rb] & 0x7f;
arch/powerpc/lib/sstep.c
2183
sh = regs->gpr[rb] & 0x7f;
arch/powerpc/lib/sstep.c
2192
sh = regs->gpr[rb] & 0x7f;
arch/powerpc/lib/sstep.c
2206
sh = rb | ((word & 2) << 4);
arch/powerpc/lib/sstep.c
2222
sh = rb | ((word & 2) << 4);
arch/powerpc/lib/sstep.c
2320
if (!((rd & 1) || rd == ra || rd == rb))
arch/powerpc/lib/sstep.c
2446
if (rb == 0)
arch/powerpc/lib/sstep.c
2447
rb = 32; /* # bytes to load */
arch/powerpc/lib/sstep.c
2448
op->type = MKOP(LOAD_MULTI, 0, rb);
arch/powerpc/lib/sstep.c
2513
if (rb == 0)
arch/powerpc/lib/sstep.c
2514
rb = 32; /* # bytes to store */
arch/powerpc/lib/sstep.c
2515
op->type = MKOP(STORE_MULTI, 0, rb);
arch/powerpc/lib/sstep.c
2563
nb = regs->gpr[rb] & 0xff;
arch/powerpc/lib/sstep.c
2612
nb = regs->gpr[rb] & 0xff;
arch/powerpc/mm/book3s64/hash_native.c
130
unsigned long rb,rs,prs,r,ric;
arch/powerpc/mm/book3s64/hash_native.c
132
rb = PPC_BIT(52); /* IS = 2 */
arch/powerpc/mm/book3s64/hash_native.c
144
: : "r"(rb), "i"(r), "i"(prs),
arch/powerpc/mm/book3s64/hash_native.c
158
unsigned long rb;
arch/powerpc/mm/book3s64/hash_native.c
160
rb = ___tlbie(vpn, psize, apsize, ssize);
arch/powerpc/mm/book3s64/hash_native.c
161
trace_tlbie(0, 0, rb, 0, 0, 0, 0);
arch/powerpc/mm/book3s64/hash_utils.c
175
unsigned long rb;
arch/powerpc/mm/book3s64/hash_utils.c
177
rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
arch/powerpc/mm/book3s64/hash_utils.c
179
asm volatile("tlbiel %0" : : "r" (rb));
arch/powerpc/mm/book3s64/hash_utils.c
190
unsigned long rb;
arch/powerpc/mm/book3s64/hash_utils.c
194
rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
arch/powerpc/mm/book3s64/hash_utils.c
198
: : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
arch/powerpc/mm/book3s64/radix_tlb.c
103
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
105
rb = PPC_BIT(53); /* IS = 1 */
arch/powerpc/mm/book3s64/radix_tlb.c
106
rb |= set << PPC_BITLSHIFT(51);
arch/powerpc/mm/book3s64/radix_tlb.c
112
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
113
trace_tlbie(0, 1, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
118
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
120
rb = PPC_BIT(53); /* IS = 1 */
arch/powerpc/mm/book3s64/radix_tlb.c
126
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
127
trace_tlbie(0, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
132
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
134
rb = PPC_BIT(52); /* IS = 2 */
arch/powerpc/mm/book3s64/radix_tlb.c
1379
unsigned long rb,prs,r,rs;
arch/powerpc/mm/book3s64/radix_tlb.c
1382
rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
arch/powerpc/mm/book3s64/radix_tlb.c
1392
: : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
1397
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
140
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
1406
unsigned long rb, rs, prs, r;
arch/powerpc/mm/book3s64/radix_tlb.c
1408
rb = PPC_BIT(53); /* IS = 1 */
arch/powerpc/mm/book3s64/radix_tlb.c
141
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
1414
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
1415
trace_tlbie(0, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
1422
unsigned long rb, rs, prs, r;
arch/powerpc/mm/book3s64/radix_tlb.c
1424
rb = va & ~(PPC_BITMASK(52, 63));
arch/powerpc/mm/book3s64/radix_tlb.c
1425
rb |= ap << PPC_BITLSHIFT(58);
arch/powerpc/mm/book3s64/radix_tlb.c
1431
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
1432
trace_tlbie(0, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
146
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
148
rb = PPC_BIT(52); /* IS = 2 */
arch/powerpc/mm/book3s64/radix_tlb.c
154
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
155
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
161
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
163
rb = va & ~(PPC_BITMASK(52, 63));
arch/powerpc/mm/book3s64/radix_tlb.c
164
rb |= ap << PPC_BITLSHIFT(58);
arch/powerpc/mm/book3s64/radix_tlb.c
170
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
171
trace_tlbie(0, 1, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
177
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
179
rb = va & ~(PPC_BITMASK(52, 63));
arch/powerpc/mm/book3s64/radix_tlb.c
180
rb |= ap << PPC_BITLSHIFT(58);
arch/powerpc/mm/book3s64/radix_tlb.c
186
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
187
trace_tlbie(0, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
193
unsigned long rb,rs,prs,r;
arch/powerpc/mm/book3s64/radix_tlb.c
195
rb = va & ~(PPC_BITMASK(52, 63));
arch/powerpc/mm/book3s64/radix_tlb.c
196
rb |= ap << PPC_BITLSHIFT(58);
arch/powerpc/mm/book3s64/radix_tlb.c
202
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
arch/powerpc/mm/book3s64/radix_tlb.c
203
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
arch/powerpc/mm/book3s64/radix_tlb.c
32
unsigned long rb;
arch/powerpc/mm/book3s64/radix_tlb.c
35
rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
arch/powerpc/mm/book3s64/radix_tlb.c
39
: : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
arch/powerpc/sysdev/fsl_pci.c
949
unsigned int rd, ra, rb, d;
arch/powerpc/sysdev/fsl_pci.c
953
rb = get_rb(inst);
arch/powerpc/sysdev/fsl_pci.c
966
regs->gpr[ra] += regs->gpr[rb];
arch/powerpc/sysdev/fsl_pci.c
975
regs->gpr[ra] += regs->gpr[rb];
arch/powerpc/sysdev/fsl_pci.c
985
regs->gpr[ra] += regs->gpr[rb];
arch/powerpc/sysdev/fsl_pci.c
994
regs->gpr[ra] += regs->gpr[rb];
arch/powerpc/sysdev/mpic.c
177
struct mpic_reg_bank *rb,
arch/powerpc/sysdev/mpic.c
183
return dcr_read(rb->dhost, reg);
arch/powerpc/sysdev/mpic.c
186
return in_be32(rb->base + (reg >> 2));
arch/powerpc/sysdev/mpic.c
189
return in_le32(rb->base + (reg >> 2));
arch/powerpc/sysdev/mpic.c
194
struct mpic_reg_bank *rb,
arch/powerpc/sysdev/mpic.c
200
dcr_write(rb->dhost, reg, value);
arch/powerpc/sysdev/mpic.c
204
out_be32(rb->base + (reg >> 2), value);
arch/powerpc/sysdev/mpic.c
208
out_le32(rb->base + (reg >> 2), value);
arch/powerpc/sysdev/mpic.c
318
struct mpic_reg_bank *rb, unsigned int offset,
arch/powerpc/sysdev/mpic.c
321
rb->base = ioremap(phys_addr + offset, size);
arch/powerpc/sysdev/mpic.c
322
BUG_ON(rb->base == NULL);
arch/powerpc/sysdev/mpic.c
326
static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
arch/powerpc/sysdev/mpic.c
330
rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
arch/powerpc/sysdev/mpic.c
331
BUG_ON(!DCR_MAP_OK(rb->dhost));
arch/powerpc/sysdev/mpic.c
335
phys_addr_t phys_addr, struct mpic_reg_bank *rb,
arch/powerpc/sysdev/mpic.c
339
_mpic_map_dcr(mpic, rb, offset, size);
arch/powerpc/sysdev/mpic.c
341
_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
arch/s390/boot/ipl_report.c
24
#define for_each_rb_entry(entry, rb) \
arch/s390/boot/ipl_report.c
25
for (entry = rb->entries; \
arch/s390/boot/ipl_report.c
26
(void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
arch/sh/kernel/disassemble.c
303
int rb = 0;
arch/sh/kernel/disassemble.c
371
rb = nibs[n] & 0x07;
arch/sh/kernel/disassemble.c
423
pr_cont("r%d_bank", rb);
arch/sparc/kernel/btext.c
196
int rb = dispDeviceRowBytes;
arch/sparc/kernel/btext.c
201
draw_byte_32(font, (unsigned int *)base, rb);
arch/sparc/kernel/btext.c
205
draw_byte_16(font, (unsigned int *)base, rb);
arch/sparc/kernel/btext.c
208
draw_byte_8(font, (unsigned int *)base, rb);
arch/sparc/kernel/btext.c
24
static void draw_byte_32(const unsigned char *bits, unsigned int *base, int rb);
arch/sparc/kernel/btext.c
240
static void draw_byte_32(const unsigned char *font, unsigned int *base, int rb)
arch/sparc/kernel/btext.c
25
static void draw_byte_16(const unsigned char *bits, unsigned int *base, int rb);
arch/sparc/kernel/btext.c
257
base = (unsigned int *) ((char *)base + rb);
arch/sparc/kernel/btext.c
26
static void draw_byte_8(const unsigned char *bits, unsigned int *base, int rb);
arch/sparc/kernel/btext.c
261
static void draw_byte_16(const unsigned char *font, unsigned int *base, int rb)
arch/sparc/kernel/btext.c
275
base = (unsigned int *) ((char *)base + rb);
arch/sparc/kernel/btext.c
279
static void draw_byte_8(const unsigned char *font, unsigned int *base, int rb)
arch/sparc/kernel/btext.c
291
base = (unsigned int *) ((char *)base + rb);
arch/x86/mm/pat/memtype.h
15
struct rb_node rb;
arch/x86/mm/pat/memtype_interval.c
46
INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
drivers/block/drbd/drbd_interval.c
102
rb_erase_augmented(&this->rb, root, &augment_callbacks);
drivers/block/drbd/drbd_interval.c
12
struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
128
rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
154
node = rb_next(&i->rb);
drivers/block/drbd/drbd_interval.c
157
i = rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
19
struct drbd_interval, rb, sector_t, end, NODE_END);
drivers/block/drbd/drbd_interval.c
34
rb_entry(*new, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.c
52
rb_link_node(&this->rb, parent, new);
drivers/block/drbd/drbd_interval.c
53
rb_insert_augmented(&this->rb, root, &augment_callbacks);
drivers/block/drbd/drbd_interval.c
76
rb_entry(node, struct drbd_interval, rb);
drivers/block/drbd/drbd_interval.h
24
RB_CLEAR_NODE(&i->rb);
drivers/block/drbd/drbd_interval.h
29
return RB_EMPTY_NODE(&i->rb);
drivers/block/drbd/drbd_interval.h
9
struct rb_node rb;
drivers/clk/renesas/r9a06g032-clocks.c
709
struct regbit rb, unsigned int on)
drivers/clk/renesas/r9a06g032-clocks.c
711
u32 __iomem *reg = clocks->reg + (rb.reg * 4);
drivers/clk/renesas/r9a06g032-clocks.c
714
if (!rb.reg && !rb.bit)
drivers/clk/renesas/r9a06g032-clocks.c
718
val = (val & ~BIT(rb.bit)) | ((!!on) << rb.bit);
drivers/clk/renesas/r9a06g032-clocks.c
722
static int clk_rdesc_get(struct r9a06g032_priv *clocks, struct regbit rb)
drivers/clk/renesas/r9a06g032-clocks.c
724
u32 __iomem *reg = clocks->reg + (rb.reg * 4);
drivers/clk/renesas/r9a06g032-clocks.c
727
return !!(val & BIT(rb.bit));
drivers/firmware/arm_scmi/raw_mode.c
1079
struct scmi_raw_buffer *rb;
drivers/firmware/arm_scmi/raw_mode.c
1087
rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
drivers/firmware/arm_scmi/raw_mode.c
1088
if (!rb)
drivers/firmware/arm_scmi/raw_mode.c
1093
for (i = 0; i < raw->tx_max_msg; i++, rb++) {
drivers/firmware/arm_scmi/raw_mode.c
1094
rb->max_len = raw->desc->max_msg_size + sizeof(u32);
drivers/firmware/arm_scmi/raw_mode.c
1095
rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
drivers/firmware/arm_scmi/raw_mode.c
1096
if (!rb->msg.buf)
drivers/firmware/arm_scmi/raw_mode.c
1098
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
1376
struct scmi_raw_buffer *rb;
drivers/firmware/arm_scmi/raw_mode.c
1404
rb = scmi_raw_buffer_get(q);
drivers/firmware/arm_scmi/raw_mode.c
1405
if (!rb) {
drivers/firmware/arm_scmi/raw_mode.c
1431
rb = scmi_raw_buffer_dequeue_unlocked(q);
drivers/firmware/arm_scmi/raw_mode.c
1432
if (WARN_ON(!rb)) {
drivers/firmware/arm_scmi/raw_mode.c
1438
rb->msg.len = rb->max_len;
drivers/firmware/arm_scmi/raw_mode.c
1446
ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
drivers/firmware/arm_scmi/raw_mode.c
1449
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
1453
scmi_raw_buffer_enqueue(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
261
struct scmi_raw_buffer *rb = NULL;
drivers/firmware/arm_scmi/raw_mode.c
266
rb = list_first_entry(head, struct scmi_raw_buffer, node);
drivers/firmware/arm_scmi/raw_mode.c
267
list_del_init(&rb->node);
drivers/firmware/arm_scmi/raw_mode.c
271
return rb;
drivers/firmware/arm_scmi/raw_mode.c
275
struct scmi_raw_buffer *rb)
drivers/firmware/arm_scmi/raw_mode.c
280
rb->msg.len = rb->max_len;
drivers/firmware/arm_scmi/raw_mode.c
283
list_add_tail(&rb->node, &q->free_bufs);
drivers/firmware/arm_scmi/raw_mode.c
288
struct scmi_raw_buffer *rb)
drivers/firmware/arm_scmi/raw_mode.c
293
list_add_tail(&rb->node, &q->msg_q);
drivers/firmware/arm_scmi/raw_mode.c
302
struct scmi_raw_buffer *rb = NULL;
drivers/firmware/arm_scmi/raw_mode.c
305
rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
drivers/firmware/arm_scmi/raw_mode.c
306
list_del_init(&rb->node);
drivers/firmware/arm_scmi/raw_mode.c
309
return rb;
drivers/firmware/arm_scmi/raw_mode.c
315
struct scmi_raw_buffer *rb;
drivers/firmware/arm_scmi/raw_mode.c
318
rb = scmi_raw_buffer_dequeue_unlocked(q);
drivers/firmware/arm_scmi/raw_mode.c
321
return rb;
drivers/firmware/arm_scmi/raw_mode.c
326
struct scmi_raw_buffer *rb;
drivers/firmware/arm_scmi/raw_mode.c
329
rb = scmi_raw_buffer_dequeue(q);
drivers/firmware/arm_scmi/raw_mode.c
330
if (rb)
drivers/firmware/arm_scmi/raw_mode.c
331
scmi_raw_buffer_put(q, rb);
drivers/firmware/arm_scmi/raw_mode.c
332
} while (rb);
drivers/firmware/arm_scmi/raw_mode.c
712
struct scmi_raw_buffer *rb;
drivers/firmware/arm_scmi/raw_mode.c
727
rb = scmi_raw_buffer_dequeue_unlocked(q);
drivers/firmware/arm_scmi/raw_mode.c
731
return rb;
drivers/firmware/arm_scmi/raw_mode.c
755
struct scmi_raw_buffer *rb;
drivers/firmware/arm_scmi/raw_mode.c
762
rb = scmi_raw_message_dequeue(q, o_nonblock);
drivers/firmware/arm_scmi/raw_mode.c
763
if (IS_ERR(rb)) {
drivers/firmware/arm_scmi/raw_mode.c
765
return PTR_ERR(rb);
drivers/firmware/arm_scmi/raw_mode.c
768
if (rb->msg.len <= len) {
drivers/firmware/arm_scmi/raw_mode.c
769
memcpy(buf, rb->msg.buf, rb->msg.len);
drivers/firmware/arm_scmi/raw_mode.c
770
*size = rb->msg.len;
drivers/firmware/arm_scmi/raw_mode.c
775
scmi_raw_buffer_put(q, rb);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
769
int rb = 0;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
846
rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
920
modifier |= AMD_FMT_MOD_SET(RB, rb) |
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
922
dcc_block_bits = max(20, 18 + rb);
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
67
struct rb_node rb;
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
414
struct amdgpu_fw_shared_rb_ptrs_struct rb;
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2818
&vm->va.rb_root, rb) {
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
93
INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1175
fw_shared->rb.rptr = 0;
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1176
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1353
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1774
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1775
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
1842
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2826
rb_node = rb_prev(&node->rb);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2834
node = container_of(rb_node, struct interval_tree_node, rb);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
2878
struct amdgpu_bo_va_mapping, rb);
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
3426
struct amdgpu_bo_va_mapping, rb);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
459
int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
505
AMD_FMT_MOD_SET(RB, rb) |
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
519
AMD_FMT_MOD_SET(RB, rb) |
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
207
dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
415
struct dmub_rb rb;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7217
static inline bool dmub_rb_empty(struct dmub_rb *rb)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7219
return (rb->wrpt == rb->rptr);
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7228
static inline uint32_t dmub_rb_num_outstanding(struct dmub_rb *rb)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7232
if (rb->wrpt >= rb->rptr)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7233
data_count = rb->wrpt - rb->rptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7235
data_count = rb->capacity - (rb->rptr - rb->wrpt);
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7246
static inline uint32_t dmub_rb_num_free(struct dmub_rb *rb)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7250
if (rb->wrpt >= rb->rptr)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7251
data_count = rb->wrpt - rb->rptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7253
data_count = rb->capacity - (rb->rptr - rb->wrpt);
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7258
return (rb->capacity - data_count) / DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7268
static inline bool dmub_rb_full(struct dmub_rb *rb)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7272
if (rb->wrpt >= rb->rptr)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7273
data_count = rb->wrpt - rb->rptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7275
data_count = rb->capacity - (rb->rptr - rb->wrpt);
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7278
return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7289
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7292
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7296
if (rb->capacity == 0)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7299
if (dmub_rb_full(rb))
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7306
rb->wrpt += DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7308
if (rb->wrpt >= rb->capacity)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7309
rb->wrpt %= rb->capacity;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7322
static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7325
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7328
if (rb->capacity == 0)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7331
if (dmub_rb_full(rb))
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7336
rb->wrpt += DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7338
if (rb->wrpt >= rb->capacity)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7339
rb->wrpt %= rb->capacity;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7352
static inline bool dmub_rb_front(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7355
uint8_t *rb_cmd = (uint8_t *)(rb->base_address) + rb->rptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7357
if (dmub_rb_empty(rb))
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7372
static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7376
if (rb->capacity == 0)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7379
*next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7381
if (*next_rptr >= rb->capacity)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7382
*next_rptr %= rb->capacity;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7394
static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7398
uint8_t *rb_cmd = (uint8_t *)(rb->base_address) + rptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7400
if (dmub_rb_empty(rb))
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7416
static inline bool dmub_rb_out_front(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7419
const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr);
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7423
if (dmub_rb_empty(rb))
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7440
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7442
if (rb->capacity == 0)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7445
if (dmub_rb_empty(rb))
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7448
rb->rptr += DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7450
if (rb->rptr >= rb->capacity)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7451
rb->rptr %= rb->capacity;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7464
static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7466
uint32_t rptr = rb->rptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7467
uint32_t wptr = rb->wrpt;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7469
if (rb->capacity == 0)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7473
uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7480
if (rptr >= rb->capacity)
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7481
rptr %= rb->capacity;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7491
static inline void dmub_rb_init(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7494
rb->base_address = init_params->base_address;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7495
rb->capacity = init_params->capacity;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7496
rb->rptr = init_params->read_ptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7497
rb->wrpt = init_params->write_ptr;
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7506
static inline void dmub_rb_get_return_data(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7510
uint8_t *rd_ptr = (rb->rptr == 0) ?
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7511
(uint8_t *)rb->base_address + rb->capacity - DMUB_RB_CMD_SIZE :
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
7512
(uint8_t *)rb->base_address + rb->rptr - DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1008
scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1009
scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1015
if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1019
if ((dmub_rb_empty(&scratch_inbox1.rb) ||
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1048
if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1162
static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1165
const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1170
if (rb->rptr == rb->wrpt)
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1178
rb->rptr += sizeof(struct dmcub_trace_buf_entry);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1180
rb->rptr %= rb->capacity;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1304
dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1329
if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1332
dmub->inbox1.rb.rptr = rptr;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1333
dmub->inbox1.rb.wrpt = wptr;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1334
dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1374
if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1396
if (rptr > dmub->inbox1.rb.capacity)
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1399
if (dmub->inbox1.rb.rptr > rptr) {
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1401
dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1403
dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
1405
dmub->inbox1.rb.rptr = rptr;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
815
dmub_rb_init(&dmub->inbox1.rb, &rb_params);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
856
dmub->inbox1.rb.wrpt = 0;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
857
dmub->inbox1.rb.rptr = 0;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
882
if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
883
dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
887
if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
910
flush_rb = dmub->inbox1.rb;
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
914
dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
916
dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
drivers/gpu/drm/drm_buddy.c
112
rb_erase(&block->rb, root);
drivers/gpu/drm/drm_buddy.c
113
RB_CLEAR_NODE(&block->rb);
drivers/gpu/drm/drm_buddy.c
1294
rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
drivers/gpu/drm/drm_buddy.c
270
if (iter == &buddy->rb)
drivers/gpu/drm/drm_buddy.c
44
RB_CLEAR_NODE(&block->rb);
drivers/gpu/drm/drm_buddy.c
507
rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
drivers/gpu/drm/drm_buddy.c
66
return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
drivers/gpu/drm/drm_buddy.c
97
rb_add(&block->rb,
drivers/gpu/drm/drm_connector.c
189
mode->rb ? " reduced blanking" : "",
drivers/gpu/drm/drm_edid.c
3050
bool rb)
drivers/gpu/drm/drm_edid.c
3063
if (rb != mode_is_rb(ptr))
drivers/gpu/drm/drm_edid.c
3804
bool rb = drm_monitor_supports_rb(drm_edid);
drivers/gpu/drm/drm_edid.c
3809
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
drivers/gpu/drm/drm_edid.c
3900
est3_modes[m].rb);
drivers/gpu/drm/drm_edid.c
692
short rb;
drivers/gpu/drm/drm_gpusvm.c
436
return container_of(node, struct drm_gpusvm_notifier, itree.rb);
drivers/gpu/drm/drm_gpusvm.c
454
node = rb_prev(¬ifier->itree.rb);
drivers/gpu/drm/drm_gpusvm.c
566
return container_of(node, struct drm_gpusvm_range, itree.rb);
drivers/gpu/drm/drm_gpusvm.c
585
node = rb_prev(&range->itree.rb);
drivers/gpu/drm/drm_gpuvm.c
1101
gpuvm->rb.tree = RB_ROOT_CACHED;
drivers/gpu/drm/drm_gpuvm.c
1102
INIT_LIST_HEAD(&gpuvm->rb.list);
drivers/gpu/drm/drm_gpuvm.c
1146
drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
drivers/gpu/drm/drm_gpuvm.c
1988
if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
drivers/gpu/drm/drm_gpuvm.c
1995
drm_gpuva_it_insert(va, &gpuvm->rb.tree);
drivers/gpu/drm/drm_gpuvm.c
1997
node = rb_prev(&va->rb.node);
drivers/gpu/drm/drm_gpuvm.c
1999
head = &(to_drm_gpuva(node))->rb.entry;
drivers/gpu/drm/drm_gpuvm.c
2001
head = &gpuvm->rb.list;
drivers/gpu/drm/drm_gpuvm.c
2003
list_add(&va->rb.entry, head);
drivers/gpu/drm/drm_gpuvm.c
2049
drm_gpuva_it_remove(va, &va->vm->rb.tree);
drivers/gpu/drm/drm_gpuvm.c
2050
list_del_init(&va->rb.entry);
drivers/gpu/drm/drm_gpuvm.c
2193
return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
drivers/gpu/drm/drm_gpuvm.c
2244
return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
drivers/gpu/drm/drm_gpuvm.c
2266
return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
drivers/gpu/drm/drm_gpuvm.c
964
#define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node)
drivers/gpu/drm/drm_gpuvm.c
972
INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
drivers/gpu/drm/drm_mm.c
153
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
drivers/gpu/drm/drm_mm.c
169
struct rb_node **link, *rb;
drivers/gpu/drm/drm_mm.c
176
rb = &hole_node->rb;
drivers/gpu/drm/drm_mm.c
177
while (rb) {
drivers/gpu/drm/drm_mm.c
178
parent = rb_entry(rb, struct drm_mm_node, rb);
drivers/gpu/drm/drm_mm.c
183
rb = rb_parent(rb);
drivers/gpu/drm/drm_mm.c
186
rb = &hole_node->rb;
drivers/gpu/drm/drm_mm.c
187
link = &hole_node->rb.rb_right;
drivers/gpu/drm/drm_mm.c
190
rb = NULL;
drivers/gpu/drm/drm_mm.c
196
rb = *link;
drivers/gpu/drm/drm_mm.c
197
parent = rb_entry(rb, struct drm_mm_node, rb);
drivers/gpu/drm/drm_mm.c
201
link = &parent->rb.rb_left;
drivers/gpu/drm/drm_mm.c
203
link = &parent->rb.rb_right;
drivers/gpu/drm/drm_mm.c
208
rb_link_node(&node->rb, rb, link);
drivers/gpu/drm/drm_mm.c
209
rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
drivers/gpu/drm/drm_mm.c
216
static u64 rb_to_hole_size(struct rb_node *rb)
drivers/gpu/drm/drm_mm.c
218
return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
drivers/gpu/drm/drm_mm.c
224
struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
drivers/gpu/drm/drm_mm.c
229
rb = *link;
drivers/gpu/drm/drm_mm.c
230
if (x > rb_to_hole_size(rb)) {
drivers/gpu/drm/drm_mm.c
231
link = &rb->rb_left;
drivers/gpu/drm/drm_mm.c
233
link = &rb->rb_right;
drivers/gpu/drm/drm_mm.c
238
rb_link_node(&node->rb_hole_size, rb, link);
drivers/gpu/drm/drm_mm.c
296
static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
drivers/gpu/drm/drm_mm.c
298
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
drivers/gpu/drm/drm_mm.c
301
static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
drivers/gpu/drm/drm_mm.c
303
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
drivers/gpu/drm/drm_mm.c
308
struct rb_node *rb = mm->holes_size.rb_root.rb_node;
drivers/gpu/drm/drm_mm.c
313
rb_entry(rb, struct drm_mm_node, rb_hole_size);
drivers/gpu/drm/drm_mm.c
317
rb = rb->rb_right;
drivers/gpu/drm/drm_mm.c
319
rb = rb->rb_left;
drivers/gpu/drm/drm_mm.c
321
} while (rb);
drivers/gpu/drm/drm_mm.c
326
static bool usable_hole_addr(struct rb_node *rb, u64 size)
drivers/gpu/drm/drm_mm.c
328
return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
drivers/gpu/drm/drm_mm.c
333
struct rb_node *rb = mm->holes_addr.rb_node;
drivers/gpu/drm/drm_mm.c
336
while (rb) {
drivers/gpu/drm/drm_mm.c
339
if (!usable_hole_addr(rb, size))
drivers/gpu/drm/drm_mm.c
342
node = rb_hole_addr_to_node(rb);
drivers/gpu/drm/drm_mm.c
346
rb = node->rb_hole_addr.rb_left;
drivers/gpu/drm/drm_mm.c
348
rb = node->rb_hole_addr.rb_right;
drivers/gpu/drm/drm_mm.c
494
static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
drivers/gpu/drm/drm_mm.c
496
return rb ? rb_to_hole_size(rb) : 0;
drivers/gpu/drm/drm_modes.c
2033
bool rb = false, cvt = false;
drivers/gpu/drm/drm_modes.c
2061
rb = true;
drivers/gpu/drm/drm_modes.c
2086
mode->rb = rb;
drivers/gpu/drm/drm_modes.c
2554
cmd->rb, cmd->interlace,
drivers/gpu/drm/drm_prime.c
101
struct rb_node **p, *rb;
drivers/gpu/drm/drm_prime.c
111
rb = NULL;
drivers/gpu/drm/drm_prime.c
116
rb = *p;
drivers/gpu/drm/drm_prime.c
117
pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
drivers/gpu/drm/drm_prime.c
119
p = &rb->rb_right;
drivers/gpu/drm/drm_prime.c
121
p = &rb->rb_left;
drivers/gpu/drm/drm_prime.c
123
rb_link_node(&member->dmabuf_rb, rb, p);
drivers/gpu/drm/drm_prime.c
126
rb = NULL;
drivers/gpu/drm/drm_prime.c
131
rb = *p;
drivers/gpu/drm/drm_prime.c
132
pos = rb_entry(rb, struct drm_prime_member, handle_rb);
drivers/gpu/drm/drm_prime.c
134
p = &rb->rb_right;
drivers/gpu/drm/drm_prime.c
136
p = &rb->rb_left;
drivers/gpu/drm/drm_prime.c
138
rb_link_node(&member->handle_rb, rb, p);
drivers/gpu/drm/drm_prime.c
147
struct rb_node *rb;
drivers/gpu/drm/drm_prime.c
149
rb = prime_fpriv->handles.rb_node;
drivers/gpu/drm/drm_prime.c
150
while (rb) {
drivers/gpu/drm/drm_prime.c
153
member = rb_entry(rb, struct drm_prime_member, handle_rb);
drivers/gpu/drm/drm_prime.c
157
rb = rb->rb_right;
drivers/gpu/drm/drm_prime.c
159
rb = rb->rb_left;
drivers/gpu/drm/drm_prime.c
169
struct rb_node *rb;
drivers/gpu/drm/drm_prime.c
171
rb = prime_fpriv->dmabufs.rb_node;
drivers/gpu/drm/drm_prime.c
172
while (rb) {
drivers/gpu/drm/drm_prime.c
175
member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
drivers/gpu/drm/drm_prime.c
180
rb = rb->rb_right;
drivers/gpu/drm/drm_prime.c
182
rb = rb->rb_left;
drivers/gpu/drm/drm_prime.c
192
struct rb_node *rb;
drivers/gpu/drm/drm_prime.c
194
rb = prime_fpriv->handles.rb_node;
drivers/gpu/drm/drm_prime.c
195
while (rb) {
drivers/gpu/drm/drm_prime.c
198
member = rb_entry(rb, struct drm_prime_member, handle_rb);
drivers/gpu/drm/drm_prime.c
207
rb = rb->rb_right;
drivers/gpu/drm/drm_prime.c
209
rb = rb->rb_left;
drivers/gpu/drm/drm_vma_manager.c
153
node = rb_entry(iter, struct drm_mm_node, rb);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
661
struct rb_node *rb;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
664
rb = obj->mmo.offsets.rb_node;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
665
while (rb) {
drivers/gpu/drm/i915/gem/i915_gem_mman.c
667
rb_entry(rb, typeof(*mmo), offset);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
675
rb = rb->rb_right;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
677
rb = rb->rb_left;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
687
struct rb_node *rb, **p;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
690
rb = NULL;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
695
rb = *p;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
696
pos = rb_entry(rb, typeof(*pos), offset);
drivers/gpu/drm/i915/gem/i915_gem_mman.c
707
p = &rb->rb_right;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
709
p = &rb->rb_left;
drivers/gpu/drm/i915/gem/i915_gem_mman.c
711
rb_link_node(&mmo->offset, rb, p);
drivers/gpu/drm/i915/gt/intel_engine.h
381
#define rb_to_uabi_engine(rb) \
drivers/gpu/drm/i915/gt/intel_engine.h
382
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1025
struct rb_node *rb = rb_first_cached(&el->virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1027
while (rb) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1029
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1034
rb_erase_cached(rb, &el->virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1035
RB_CLEAR_NODE(rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1036
rb = rb_first_cached(&el->virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1278
struct rb_node *rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1445
rb = &ve->nodes[engine->id].rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1446
rb_erase_cached(rb, &execlists->virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1447
RB_CLEAR_NODE(rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1488
while ((rb = rb_first_cached(&sched_engine->queue))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
1489
struct i915_priolist *p = to_priolist(rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
198
struct rb_node rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
257
static struct i915_priolist *to_priolist(struct rb_node *rb)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
259
return rb_entry(rb, struct i915_priolist, node);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
287
struct rb_node *rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
289
rb = rb_first_cached(&sched_engine->queue);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
290
if (!rb)
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
293
return to_priolist(rb)->priority;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
298
struct rb_node *rb = rb_first_cached(&el->virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
300
return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3149
struct rb_node *rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3179
while ((rb = rb_first_cached(&sched_engine->queue))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3180
struct i915_priolist *p = to_priolist(rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3198
while ((rb = rb_first_cached(&execlists->virtual))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3200
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3202
rb_erase_cached(rb, &execlists->virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3203
RB_CLEAR_NODE(rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3634
struct rb_node *node = &ve->nodes[sibling->id].rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3825
struct rb_node **parent, *rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3834
if (!RB_EMPTY_NODE(&node->rb)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3835
rb_erase_cached(&node->rb,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3837
RB_CLEAR_NODE(&node->rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3843
if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3849
&node->rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3853
rb_erase_cached(&node->rb, &sibling->execlists.virtual);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3856
rb = NULL;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3862
rb = *parent;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3863
other = rb_entry(rb, typeof(*other), rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3865
parent = &rb->rb_left;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3867
parent = &rb->rb_right;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3872
rb_link_node(&node->rb, rb, parent);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3873
rb_insert_color_cached(&node->rb,
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
3878
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4020
GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4021
RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4086
struct rb_node *rb;
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4113
for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4114
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4134
for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
4136
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
268
const struct guc_mmio_reg *rb = b;
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
270
return (int)ra->offset - (int)rb->offset;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
1978
struct rb_node *rb;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2002
while ((rb = rb_first_cached(&sched_engine->queue))) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
2003
struct i915_priolist *p = to_priolist(rb);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
404
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
406
return rb_entry(rb, struct i915_priolist, node);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5495
struct rb_node *rb;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5511
for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
5512
struct i915_priolist *pl = to_priolist(rb);
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
932
struct rb_node *rb;
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
953
while ((rb = rb_first_cached(&sched_engine->queue))) {
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
954
struct i915_priolist *p = to_priolist(rb);
drivers/gpu/drm/i915/i915_scheduler.c
113
rb_link_node(&p->node, rb, parent);
drivers/gpu/drm/i915/i915_scheduler.c
34
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
drivers/gpu/drm/i915/i915_scheduler.c
36
return rb_entry(rb, struct i915_priolist, node);
drivers/gpu/drm/i915/i915_scheduler.c
41
struct rb_node *rb;
drivers/gpu/drm/i915/i915_scheduler.c
51
for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
drivers/gpu/drm/i915/i915_scheduler.c
52
const struct i915_priolist *p = to_priolist(rb);
drivers/gpu/drm/i915/i915_scheduler.c
63
struct rb_node **parent, *rb;
drivers/gpu/drm/i915/i915_scheduler.c
74
rb = NULL;
drivers/gpu/drm/i915/i915_scheduler.c
77
rb = *parent;
drivers/gpu/drm/i915/i915_scheduler.c
78
p = to_priolist(rb);
drivers/gpu/drm/i915/i915_scheduler.c
80
parent = &rb->rb_left;
drivers/gpu/drm/i915/i915_scheduler.c
82
parent = &rb->rb_right;
drivers/gpu/drm/i915/i915_vma.c
154
struct rb_node *rb, **p;
drivers/gpu/drm/i915/i915_vma.c
237
rb = NULL;
drivers/gpu/drm/i915/i915_vma.c
242
rb = *p;
drivers/gpu/drm/i915/i915_vma.c
243
pos = rb_entry(rb, struct i915_vma, obj_node);
drivers/gpu/drm/i915/i915_vma.c
252
p = &rb->rb_right;
drivers/gpu/drm/i915/i915_vma.c
254
p = &rb->rb_left;
drivers/gpu/drm/i915/i915_vma.c
258
rb_link_node(&vma->obj_node, rb, p);
drivers/gpu/drm/i915/i915_vma.c
291
struct rb_node *rb;
drivers/gpu/drm/i915/i915_vma.c
293
rb = obj->vma.tree.rb_node;
drivers/gpu/drm/i915/i915_vma.c
294
while (rb) {
drivers/gpu/drm/i915/i915_vma.c
295
struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
drivers/gpu/drm/i915/i915_vma.c
303
rb = rb->rb_right;
drivers/gpu/drm/i915/i915_vma.c
305
rb = rb->rb_left;
drivers/gpu/drm/i915/i915_vma_resource.c
117
if (!RB_EMPTY_NODE(&vma_res->rb)) {
drivers/gpu/drm/i915/i915_vma_resource.c
245
RB_CLEAR_NODE(&vma_res->rb);
drivers/gpu/drm/i915/i915_vma_resource.c
39
INTERVAL_TREE_DEFINE(struct i915_vma_resource, rb,
drivers/gpu/drm/i915/i915_vma_resource.h
107
struct rb_node rb;
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
222
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
305
if (!adreno_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
58
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
289
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
404
if (!adreno_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
87
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
158
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
325
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
388
if (!adreno_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
480
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
523
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
934
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
948
shadowptr(a5xx_gpu, gpu->rb[0]));
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
971
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
972
OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
974
a5xx_flush(gpu, gpu->rb[0], true);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
975
if (!a5xx_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
989
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
990
OUT_RING(gpu->rb[0], 0x00000000);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
992
a5xx_flush(gpu, gpu->rb[0], true);
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
993
if (!a5xx_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/a5xx_power.c
224
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
224
a5xx_gpu->cur_ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
235
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
236
a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
315
if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
65
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1002
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1512
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1527
shadowptr(a6xx_gpu, gpu->rb[0]));
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1535
rbmemptr(gpu->rb[0], bv_rptr));
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1541
a6xx_gpu->cur_ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1544
gpu->rb[i]->cur_ctx_seqno = 0;
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1567
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1568
OUT_RING(gpu->rb[0], 0x00000000);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1570
a6xx_flush(gpu, gpu->rb[0]);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
1571
if (!a6xx_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
945
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
971
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
220
record_ptr->rptr_addr = shadowptr(a6xx_gpu, gpu->rb[i]);
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
223
record_ptr->rbase = gpu->rb[i]->iova;
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
238
a6xx_gpu->cur_ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
442
if (preempt_init_ring(a6xx_gpu, gpu->rb[i]))
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
73
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
391
struct msm_ringbuffer *ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
644
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
648
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0]));
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
649
gpu_write64(gpu, REG_A8XX_CP_RB_RPTR_ADDR_BV, rbmemptr(gpu->rb[0], bv_rptr));
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
655
a6xx_gpu->cur_ring = gpu->rb[0];
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
658
gpu->rb[i]->cur_ctx_seqno = 0;
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
676
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
677
OUT_RING(gpu->rb[0], 0x00000000);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
679
a6xx_flush(gpu, gpu->rb[0]);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
680
if (!a8xx_idle(gpu, gpu->rb[0]))
drivers/gpu/drm/msm/adreno/adreno_device.c
344
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
drivers/gpu/drm/msm/adreno/adreno_device.c
355
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
drivers/gpu/drm/msm/adreno/adreno_gpu.c
1038
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/adreno/adreno_gpu.c
662
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/adreno/adreno_gpu.c
695
return gpu->rb[0];
drivers/gpu/drm/msm/adreno/adreno_gpu.c
766
state->ring[i].fence = gpu->rb[i]->memptrs->fence;
drivers/gpu/drm/msm/adreno/adreno_gpu.c
767
state->ring[i].iova = gpu->rb[i]->iova;
drivers/gpu/drm/msm/adreno/adreno_gpu.c
768
state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
drivers/gpu/drm/msm/adreno/adreno_gpu.c
769
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
drivers/gpu/drm/msm/adreno/adreno_gpu.c
770
state->ring[i].wptr = get_wptr(gpu->rb[i]);
drivers/gpu/drm/msm/adreno/adreno_gpu.c
777
if (gpu->rb[i]->start[j])
drivers/gpu/drm/msm/adreno/adreno_gpu.c
781
state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL);
drivers/gpu/drm/msm/msm_gem_submit.c
600
ring = gpu->rb[queue->ring_nr];
drivers/gpu/drm/msm/msm_gem_submit.c
74
submit->ring = gpu->rb[queue->ring_nr];
drivers/gpu/drm/msm/msm_gpu.c
1086
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
drivers/gpu/drm/msm/msm_gpu.c
1088
ARRAY_SIZE(gpu->rb));
drivers/gpu/drm/msm/msm_gpu.c
1089
nr_rings = ARRAY_SIZE(gpu->rb);
drivers/gpu/drm/msm/msm_gpu.c
1094
gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
drivers/gpu/drm/msm/msm_gpu.c
1096
if (IS_ERR(gpu->rb[i])) {
drivers/gpu/drm/msm/msm_gpu.c
1097
ret = PTR_ERR(gpu->rb[i]);
drivers/gpu/drm/msm/msm_gpu.c
1114
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
drivers/gpu/drm/msm/msm_gpu.c
1115
msm_ringbuffer_destroy(gpu->rb[i]);
drivers/gpu/drm/msm/msm_gpu.c
1116
gpu->rb[i] = NULL;
drivers/gpu/drm/msm/msm_gpu.c
1131
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
drivers/gpu/drm/msm/msm_gpu.c
1132
msm_ringbuffer_destroy(gpu->rb[i]);
drivers/gpu/drm/msm/msm_gpu.c
1133
gpu->rb[i] = NULL;
drivers/gpu/drm/msm/msm_gpu.c
535
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/msm_gpu.c
560
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/msm_gpu.c
832
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/msm_gpu.c
872
msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
drivers/gpu/drm/msm/msm_gpu.h
183
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
drivers/gpu/drm/msm/msm_gpu.h
314
struct msm_ringbuffer *ring = gpu->rb[i];
drivers/gpu/drm/msm/msm_submitqueue.c
229
queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
drivers/gpu/drm/omapdrm/dss/dispc.c
1291
FLD_VAL(coefs->rb, 9, 0);
drivers/gpu/drm/omapdrm/dss/omapdss.h
144
s16 rr, rg, rb;
drivers/gpu/drm/omapdrm/omap_crtc.c
387
cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]);
drivers/gpu/drm/radeon/radeon_vm.c
1239
&vm->va.rb_root, it.rb) {
drivers/gpu/drm/scheduler/sched_main.c
316
struct rb_node *rb;
drivers/gpu/drm/scheduler/sched_main.c
319
for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
drivers/gpu/drm/scheduler/sched_main.c
322
entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
drivers/gpu/drm/scheduler/sched_main.c
338
return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
106
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
128
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
150
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
172
KUNIT_EXPECT_TRUE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
194
KUNIT_EXPECT_TRUE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
217
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
240
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
26
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
264
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
288
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
312
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
336
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
360
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
384
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
411
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
435
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
44
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
457
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
479
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
555
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
578
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
601
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
624
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
647
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
66
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
670
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
697
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
720
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
744
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
766
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
789
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
812
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
832
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
88
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/tests/drm_cmdline_parser_test.c
980
KUNIT_EXPECT_FALSE(test, mode.rb);
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
1052
const struct vf_runtime_reg *rb = b;
drivers/gpu/drm/xe/xe_gt_sriov_vf.c
1054
return (int)ra->offset - (int)rb->offset;
drivers/gpu/drm/xe/xe_range_fence.c
16
INTERVAL_TREE_DEFINE(struct xe_range_fence, rb, u64, __subtree_last,
drivers/gpu/drm/xe/xe_range_fence.h
25
struct rb_node rb;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
105
rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
107
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
108
kfree(rb->buffer.data);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
109
kfree(rb);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
115
rb = list_entry(cl->in_process_list.list.next,
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
117
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
118
kfree(rb->buffer.data);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
119
kfree(rb);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
165
void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
167
if (rb == NULL)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
170
kfree(rb->buffer.data);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
171
kfree(rb);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
184
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
186
rb = kzalloc_obj(struct ishtp_cl_rb);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
187
if (!rb)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
190
INIT_LIST_HEAD(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
191
rb->cl = cl;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
192
rb->buf_idx = 0;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
193
return rb;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
205
int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
207
if (!rb)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
213
rb->buffer.data = kmalloc(length, GFP_KERNEL);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
214
if (!rb->buffer.data)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
217
rb->buffer.size = length;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
229
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
23
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
235
if (!rb || !rb->cl)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
238
cl = rb->cl;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
240
list_add_tail(&rb->list, &cl->free_rb_list.list);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
265
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
268
rb = list_first_entry_or_null(&cl->in_process_list.list,
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
270
if (rb)
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
271
list_del_init(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
274
return rb;
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
28
rb = ishtp_io_rb_init(cl);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
29
if (!rb) {
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
33
ret = ishtp_io_rb_alloc_buf(rb, len);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
37
list_add_tail(&rb->list, &cl->free_rb_list.list);
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
99
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client.c
1003
list_for_each_entry(rb, &dev->read_list.list, list) {
drivers/hid/intel-ish-hid/ishtp/client.c
1004
cl = rb->cl;
drivers/hid/intel-ish-hid/ishtp/client.c
1011
if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
drivers/hid/intel-ish-hid/ishtp/client.c
1015
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
1016
ishtp_io_rb_free(rb);
drivers/hid/intel-ish-hid/ishtp/client.c
1027
if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
drivers/hid/intel-ish-hid/ishtp/client.c
1031
rb->buffer.size, ishtp_hdr->length,
drivers/hid/intel-ish-hid/ishtp/client.c
1032
rb->buf_idx);
drivers/hid/intel-ish-hid/ishtp/client.c
1033
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
1034
ishtp_cl_io_rb_recycle(rb);
drivers/hid/intel-ish-hid/ishtp/client.c
1039
buffer = rb->buffer.data + rb->buf_idx;
drivers/hid/intel-ish-hid/ishtp/client.c
1042
rb->buf_idx += ishtp_hdr->length;
drivers/hid/intel-ish-hid/ishtp/client.c
1046
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
1047
complete_rb = rb;
drivers/hid/intel-ish-hid/ishtp/client.c
1115
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client.c
1123
list_for_each_entry(rb, &dev->read_list.list, list) {
drivers/hid/intel-ish-hid/ishtp/client.c
1124
cl = rb->cl;
drivers/hid/intel-ish-hid/ishtp/client.c
1133
if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
drivers/hid/intel-ish-hid/ishtp/client.c
1137
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
1138
ishtp_io_rb_free(rb);
drivers/hid/intel-ish-hid/ishtp/client.c
1149
if (rb->buffer.size < hbm->msg_length) {
drivers/hid/intel-ish-hid/ishtp/client.c
1153
rb->buffer.size, hbm->msg_length, rb->buf_idx);
drivers/hid/intel-ish-hid/ishtp/client.c
1154
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
1155
ishtp_cl_io_rb_recycle(rb);
drivers/hid/intel-ish-hid/ishtp/client.c
1160
buffer = rb->buffer.data;
drivers/hid/intel-ish-hid/ishtp/client.c
1171
rb->buf_idx = hbm->msg_length;
drivers/hid/intel-ish-hid/ishtp/client.c
1175
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
1176
complete_rb = rb;
drivers/hid/intel-ish-hid/ishtp/client.c
25
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client.c
30
list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
drivers/hid/intel-ish-hid/ishtp/client.c
31
if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
drivers/hid/intel-ish-hid/ishtp/client.c
32
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
34
list_add_tail(&rb->list, &cl->free_rb_list.list);
drivers/hid/intel-ish-hid/ishtp/client.c
593
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client.c
623
rb = NULL;
drivers/hid/intel-ish-hid/ishtp/client.c
627
rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
drivers/hid/intel-ish-hid/ishtp/client.c
628
list_del_init(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
631
rb->cl = cl;
drivers/hid/intel-ish-hid/ishtp/client.c
632
rb->buf_idx = 0;
drivers/hid/intel-ish-hid/ishtp/client.c
634
INIT_LIST_HEAD(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
642
list_add_tail(&rb->list, &dev->read_list.list);
drivers/hid/intel-ish-hid/ishtp/client.c
650
if (rets && rb) {
drivers/hid/intel-ish-hid/ishtp/client.c
652
list_del(&rb->list);
drivers/hid/intel-ish-hid/ishtp/client.c
656
list_add_tail(&rb->list, &cl->free_rb_list.list);
drivers/hid/intel-ish-hid/ishtp/client.c
755
static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
drivers/hid/intel-ish-hid/ishtp/client.c
759
struct ishtp_cl *cl = rb->cl;
drivers/hid/intel-ish-hid/ishtp/client.c
767
list_add_tail(&rb->list, &cl->in_process_list.list);
drivers/hid/intel-ish-hid/ishtp/client.c
985
struct ishtp_cl_rb *rb;
drivers/hid/intel-ish-hid/ishtp/client.h
138
int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
drivers/i2c/busses/i2c-cpm.c
188
u_char *rb;
drivers/i2c/busses/i2c-cpm.c
197
rb = cpm->rxbuf[rx];
drivers/i2c/busses/i2c-cpm.c
200
rb = (u_char *) (((ulong) rb + 1) & ~1);
drivers/i2c/busses/i2c-cpm.c
246
u_char *rb;
drivers/i2c/busses/i2c-cpm.c
253
rb = cpm->rxbuf[rx];
drivers/i2c/busses/i2c-cpm.c
256
rb = (u_char *) (((uint) rb + 1) & ~1);
drivers/i2c/busses/i2c-cpm.c
276
memcpy(pmsg->buf, rb, pmsg->len);
drivers/i2c/i2c-stub.c
101
if (rb == NULL && create) {
drivers/i2c/i2c-stub.c
102
rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL);
drivers/i2c/i2c-stub.c
103
if (rb == NULL)
drivers/i2c/i2c-stub.c
104
return rb;
drivers/i2c/i2c-stub.c
105
rb->command = command;
drivers/i2c/i2c-stub.c
106
list_add(&rb->node, &chip->smbus_blocks);
drivers/i2c/i2c-stub.c
108
return rb;
drivers/i2c/i2c-stub.c
93
struct smbus_block_data *b, *rb = NULL;
drivers/i2c/i2c-stub.c
97
rb = b;
drivers/iio/industrialio-buffer.c
144
struct iio_buffer *rb = ib->buffer;
drivers/iio/industrialio-buffer.c
154
if (!rb || !rb->access->read)
drivers/iio/industrialio-buffer.c
157
if (rb->direction != IIO_BUFFER_DIRECTION_IN)
drivers/iio/industrialio-buffer.c
160
datum_size = rb->bytes_per_datum;
drivers/iio/industrialio-buffer.c
172
to_wait = min_t(size_t, n / datum_size, rb->watermark);
drivers/iio/industrialio-buffer.c
174
add_wait_queue(&rb->pollq, &wait);
drivers/iio/industrialio-buffer.c
181
if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
drivers/iio/industrialio-buffer.c
192
ret = rb->access->read(rb, n, buf);
drivers/iio/industrialio-buffer.c
196
remove_wait_queue(&rb->pollq, &wait);
drivers/iio/industrialio-buffer.c
213
struct iio_buffer *rb = ib->buffer;
drivers/iio/industrialio-buffer.c
222
if (!rb || !rb->access->write)
drivers/iio/industrialio-buffer.c
225
if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
drivers/iio/industrialio-buffer.c
229
add_wait_queue(&rb->pollq, &wait);
drivers/iio/industrialio-buffer.c
236
if (!iio_buffer_space_available(rb)) {
drivers/iio/industrialio-buffer.c
253
ret = rb->access->write(rb, n - written, buf + written);
drivers/iio/industrialio-buffer.c
260
remove_wait_queue(&rb->pollq, &wait);
drivers/iio/industrialio-buffer.c
278
struct iio_buffer *rb = ib->buffer;
drivers/iio/industrialio-buffer.c
281
if (!indio_dev->info || !rb)
drivers/iio/industrialio-buffer.c
284
poll_wait(filp, &rb->pollq, wait);
drivers/iio/industrialio-buffer.c
286
switch (rb->direction) {
drivers/iio/industrialio-buffer.c
288
if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
drivers/iio/industrialio-buffer.c
292
if (iio_buffer_space_available(rb))
drivers/iio/industrialio-buffer.c
304
struct iio_buffer *rb = ib->buffer;
drivers/iio/industrialio-buffer.c
307
if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
drivers/iio/industrialio-buffer.c
317
struct iio_buffer *rb = ib->buffer;
drivers/iio/industrialio-buffer.c
320
if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
drivers/iio/industrialio-buffer.c
330
struct iio_buffer *rb = ib->buffer;
drivers/iio/industrialio-buffer.c
333
if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
drivers/infiniband/hw/hfi1/pin_system.c
111
return container_of(rb_node, struct sdma_mmu_node, rb);
drivers/infiniband/hw/hfi1/pin_system.c
151
node->rb.addr = start_address;
drivers/infiniband/hw/hfi1/pin_system.c
152
node->rb.len = length;
drivers/infiniband/hw/hfi1/pin_system.c
17
struct mmu_rb_node rb;
drivers/infiniband/hw/hfi1/pin_system.c
181
kref_init(&node->rb.refcount);
drivers/infiniband/hw/hfi1/pin_system.c
184
kref_get(&node->rb.refcount);
drivers/infiniband/hw/hfi1/pin_system.c
189
ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
drivers/infiniband/hw/hfi1/pin_system.c
239
if (node->rb.addr <= start) {
drivers/infiniband/hw/hfi1/pin_system.c
249
node->rb.addr, kref_read(&node->rb.refcount));
drivers/infiniband/hw/hfi1/pin_system.c
250
prepend_len = node->rb.addr - start;
drivers/infiniband/hw/hfi1/pin_system.c
256
kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
drivers/infiniband/hw/hfi1/pin_system.c
279
kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
drivers/infiniband/hw/hfi1/pin_system.c
301
page_index = PFN_DOWN(start - cache_entry->rb.addr);
drivers/infiniband/hw/hfi1/pin_system.c
366
from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
drivers/infiniband/hw/hfi1/pin_system.c
378
kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
drivers/infiniband/hw/hfi1/pin_system.c
455
container_of(mnode, struct sdma_mmu_node, rb);
drivers/infiniband/hw/hfi1/pin_system.c
471
container_of(mnode, struct sdma_mmu_node, rb);
drivers/infiniband/hw/hfi1/pin_system.c
74
return node->rb.handler->mn.mm;
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
269
INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb,
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
40
struct rb_node rb;
drivers/iommu/iommufd/io_pagetable.c
251
RB_CLEAR_NODE(&area->node.rb);
drivers/iommu/iommufd/io_pagetable.c
252
RB_CLEAR_NODE(&area->pages_node.rb);
drivers/iommu/iommufd/pages.c
2015
WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
drivers/iommu/riscv/iommu.c
136
u64 qb, rb;
drivers/iommu/riscv/iommu.c
182
rb = riscv_iommu_readq(iommu, queue->qbr);
drivers/iommu/riscv/iommu.c
183
if (rb != qb) {
drivers/md/dm-cache-target.c
1376
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
drivers/md/dm-cache-target.c
1378
BUG_ON(rb); /* An exclusive lock must _not_ be held for this block */
drivers/md/dm-cache-target.c
1622
bool rb, background_queued;
drivers/md/dm-cache-target.c
1627
rb = bio_detain_shared(cache, block, bio);
drivers/md/dm-cache-target.c
1628
if (!rb) {
drivers/media/dvb-frontends/dib3000mb.c
54
u8 rb[2] = {};
drivers/media/dvb-frontends/dib3000mb.c
57
{ .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 },
drivers/media/dvb-frontends/dib3000mb.c
64
(rb[0] << 8) | rb[1],(rb[0] << 8) | rb[1]);
drivers/media/dvb-frontends/dib3000mb.c
66
return (rb[0] << 8) | rb[1];
drivers/media/pci/ngene/ngene-core.c
756
static void free_ringbuffer(struct ngene *dev, struct SRingBufferDescriptor *rb)
drivers/media/pci/ngene/ngene-core.c
758
struct SBufferHeader *Cur = rb->Head;
drivers/media/pci/ngene/ngene-core.c
764
for (j = 0; j < rb->NumBuffers; j++, Cur = Cur->Next) {
drivers/media/pci/ngene/ngene-core.c
767
rb->Buffer1Length, Cur->Buffer1,
drivers/media/pci/ngene/ngene-core.c
772
rb->Buffer2Length, Cur->Buffer2,
drivers/media/pci/ngene/ngene-core.c
776
if (rb->SCListMem)
drivers/media/pci/ngene/ngene-core.c
777
dma_free_coherent(&dev->pci_dev->dev, rb->SCListMemSize,
drivers/media/pci/ngene/ngene-core.c
778
rb->SCListMem, rb->PASCListMem);
drivers/media/pci/ngene/ngene-core.c
780
dma_free_coherent(&dev->pci_dev->dev, rb->MemSize, rb->Head,
drivers/media/pci/ngene/ngene-core.c
781
rb->PAHead);
drivers/media/pci/ngene/ngene-core.c
785
struct SRingBufferDescriptor *rb,
drivers/media/pci/ngene/ngene-core.c
791
if (!rb->Head)
drivers/media/pci/ngene/ngene-core.c
793
free_ringbuffer(dev, rb);
drivers/media/platform/chips-media/coda/coda-bit.c
1041
struct v4l2_requestbuffers *rb)
drivers/media/platform/chips-media/coda/coda-bit.c
1046
if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
drivers/media/platform/chips-media/coda/coda-bit.c
1049
if (rb->count) {
drivers/media/platform/chips-media/coda/coda-bit.c
1797
struct v4l2_requestbuffers *rb)
drivers/media/platform/chips-media/coda/coda-bit.c
1802
if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
drivers/media/platform/chips-media/coda/coda-bit.c
1805
if (rb->count) {
drivers/media/platform/chips-media/coda/coda-common.c
963
struct v4l2_requestbuffers *rb)
drivers/media/platform/chips-media/coda/coda-common.c
968
ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
drivers/media/platform/chips-media/coda/coda-common.c
976
if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs)
drivers/media/platform/chips-media/coda/coda-common.c
977
return ctx->ops->reqbufs(ctx, rb);
drivers/media/platform/chips-media/coda/coda.h
206
int (*reqbufs)(struct coda_ctx *ctx, struct v4l2_requestbuffers *rb);
drivers/media/platform/rockchip/rga/rga-hw.c
44
struct rga_addr_offset *lt, *lb, *rt, *rb;
drivers/media/platform/rockchip/rga/rga-hw.c
51
rb = &offsets.right_bottom;
drivers/media/platform/rockchip/rga/rga-hw.c
70
rb->y_off = lb->y_off + (w - 1) * pixel_width;
drivers/media/platform/rockchip/rga/rga-hw.c
71
rb->u_off = lb->u_off + w / x_div - 1;
drivers/media/platform/rockchip/rga/rga-hw.c
72
rb->v_off = lb->v_off + w / x_div - 1;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
533
struct v4l2_requestbuffers *rb)
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
538
ret = vb2_ioctl_reqbufs(file, priv, rb);
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
542
if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) {
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
543
rb->count = 0;
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
544
vb2_ioctl_reqbufs(file, priv, rb);
drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c
548
isp->video_capture.reqbufs_count = rb->count;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
885
struct v4l2_requestbuffers *rb)
drivers/media/platform/samsung/s3c-camif/camif-capture.c
891
vp->id, rb->count, vp->owner, file_to_v4l2_fh(file));
drivers/media/platform/samsung/s3c-camif/camif-capture.c
896
if (rb->count)
drivers/media/platform/samsung/s3c-camif/camif-capture.c
897
rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
901
ret = vb2_reqbufs(&vp->vb_queue, rb);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
905
if (rb->count && rb->count < CAMIF_REQ_BUFS_MIN) {
drivers/media/platform/samsung/s3c-camif/camif-capture.c
906
rb->count = 0;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
907
vb2_reqbufs(&vp->vb_queue, rb);
drivers/media/platform/samsung/s3c-camif/camif-capture.c
911
vp->reqbufs_count = rb->count;
drivers/media/platform/samsung/s3c-camif/camif-capture.c
912
if (vp->owner == NULL && rb->count > 0)
drivers/media/platform/ti/omap3isp/ispvideo.c
941
isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
drivers/media/platform/ti/omap3isp/ispvideo.c
948
ret = vb2_reqbufs(&vfh->queue, rb);
drivers/media/usb/dvb-usb/dib0700_devices.c
2217
u8 rb[2];
drivers/media/usb/dvb-usb/dib0700_devices.c
2220
{.addr = 0x1e >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2},
drivers/media/usb/dvb-usb/dib0700_devices.c
2229
switch (rb[0] << 8 | rb[1]) {
drivers/media/usb/dvb-usb/dib0700_devices.c
2254
wb[2] |= rb[0];
drivers/media/usb/dvb-usb/dib0700_devices.c
2255
wb[3] |= rb[1] & ~(3 << 4);
drivers/media/v4l2-core/v4l2-mem2mem.c
1374
struct v4l2_requestbuffers *rb)
drivers/media/v4l2-core/v4l2-mem2mem.c
1378
return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
104
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
106
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
111
true, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
124
void __iomem *rb = priv->reg_base;
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
141
writel(EEPROM_CMD_EPC_BUSY_BIT | (off + byte), rb +
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
148
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
154
buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
165
void __iomem *rb = priv->reg_base;
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
182
writel(*(value + byte), rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
185
writel(regval, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
187
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
193
rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
219
void __iomem *rb = priv->reg_base;
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
238
data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
240
rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
241
data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
243
rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
249
rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
251
data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
257
buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
268
void __iomem *rb = priv->reg_base;
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
292
data = readl(rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
294
rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
295
writel(*(value + byte), rb + MMAP_OTP_OFFSET(OTP_PRGM_DATA_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
296
data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
298
rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
299
data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
301
rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
307
rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
309
data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET));
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
99
void __iomem *rb = priv->reg_base;
drivers/misc/sram.c
170
const struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
drivers/misc/sram.c
172
return ra->start - rb->start;
drivers/mtd/mtdswap.c
199
rb_erase(&eb->rb, eb->root);
drivers/mtd/mtdswap.c
211
cur = rb_entry(parent, struct swap_eb, rb);
drivers/mtd/mtdswap.c
218
rb_link_node(&eb->rb, parent, p);
drivers/mtd/mtdswap.c
219
rb_insert_color(&eb->rb, root);
drivers/mtd/mtdswap.c
438
median = rb_entry(medrb, struct swap_eb, rb)->erase_count;
drivers/mtd/mtdswap.c
451
rb_erase(&eb->rb, &hist_root);
drivers/mtd/mtdswap.c
576
eb = rb_entry(rb_first(clean_root), struct swap_eb, rb);
drivers/mtd/mtdswap.c
577
rb_erase(&eb->rb, clean_root);
drivers/mtd/mtdswap.c
70
struct rb_node rb;
drivers/mtd/mtdswap.c
80
rb)->erase_count)
drivers/mtd/mtdswap.c
82
rb)->erase_count)
drivers/mtd/mtdswap.c
860
eb = rb_entry(rb_first(rp), struct swap_eb, rb);
drivers/mtd/mtdswap.c
862
rb_erase(&eb->rb, rp);
drivers/mtd/nand/raw/arasan-nand-controller.c
1277
int rb, ret, i;
drivers/mtd/nand/raw/arasan-nand-controller.c
1305
ret = of_property_read_u32(np, "nand-rb", &rb);
drivers/mtd/nand/raw/arasan-nand-controller.c
1309
if (rb >= ANFC_MAX_CS) {
drivers/mtd/nand/raw/arasan-nand-controller.c
1310
dev_err(nfc->dev, "Wrong RB %d\n", rb);
drivers/mtd/nand/raw/arasan-nand-controller.c
1314
anand->rb = rb;
drivers/mtd/nand/raw/arasan-nand-controller.c
174
unsigned int rb;
drivers/mtd/nand/raw/arasan-nand-controller.c
261
val & BIT(anand->rb),
drivers/mtd/nand/raw/atmel/nand-controller.c
1060
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
drivers/mtd/nand/raw/atmel/nand-controller.c
150
struct atmel_nand_rb rb;
drivers/mtd/nand/raw/atmel/nand-controller.c
1530
if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
drivers/mtd/nand/raw/atmel/nand-controller.c
1531
cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
drivers/mtd/nand/raw/atmel/nand-controller.c
1700
nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
drivers/mtd/nand/raw/atmel/nand-controller.c
1701
nand->cs[i].rb.id = val;
drivers/mtd/nand/raw/atmel/nand-controller.c
1715
nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
drivers/mtd/nand/raw/atmel/nand-controller.c
1716
nand->cs[i].rb.gpio = gpio;
drivers/mtd/nand/raw/atmel/nand-controller.c
1834
nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
drivers/mtd/nand/raw/atmel/nand-controller.c
1835
nand->cs[0].rb.gpio = gpio;
drivers/mtd/nand/raw/atmel/nand-controller.c
521
if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
drivers/mtd/nand/raw/atmel/nand-controller.c
524
return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
drivers/mtd/nand/raw/atmel/nand-controller.c
534
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
drivers/mtd/nand/raw/atmel/nand-controller.c
538
mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
drivers/mtd/nand/raw/marvell_nand.c
174
#define NDSR_RDY(rb) BIT(11 + rb)
drivers/mtd/nand/raw/marvell_nand.c
2611
u32 cs, rb;
drivers/mtd/nand/raw/marvell_nand.c
2697
rb = 0;
drivers/mtd/nand/raw/marvell_nand.c
2700
&rb);
drivers/mtd/nand/raw/marvell_nand.c
2709
if (rb >= nfc->caps->max_rb_nb) {
drivers/mtd/nand/raw/marvell_nand.c
2711
rb, nfc->caps->max_rb_nb);
drivers/mtd/nand/raw/marvell_nand.c
2715
marvell_nand->sels[i].rb = rb;
drivers/mtd/nand/raw/marvell_nand.c
324
unsigned int rb;
drivers/mtd/nand/raw/sunxi_nand.c
2085
if (sunxi_nand->sels[op->cs].rb >= 0)
drivers/mtd/nand/raw/sunxi_nand.c
2168
sunxi_nand->sels[i].rb = tmp;
drivers/mtd/nand/raw/sunxi_nand.c
2170
sunxi_nand->sels[i].rb = -1;
drivers/mtd/nand/raw/sunxi_nand.c
229
s8 rb;
drivers/mtd/nand/raw/sunxi_nand.c
540
if (sel->rb >= 0)
drivers/mtd/nand/raw/sunxi_nand.c
541
ctl |= NFC_RB_SEL(sel->rb);
drivers/mtd/ubi/attach.c
113
av = rb_entry(parent, struct ubi_ainf_volume, rb);
drivers/mtd/ubi/attach.c
1289
aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
drivers/mtd/ubi/attach.c
1292
if (this->rb_left == &aeb->u.rb)
drivers/mtd/ubi/attach.c
1315
struct rb_node *rb;
drivers/mtd/ubi/attach.c
1339
rb = ai->volumes.rb_node;
drivers/mtd/ubi/attach.c
1340
while (rb) {
drivers/mtd/ubi/attach.c
1341
if (rb->rb_left)
drivers/mtd/ubi/attach.c
1342
rb = rb->rb_left;
drivers/mtd/ubi/attach.c
1343
else if (rb->rb_right)
drivers/mtd/ubi/attach.c
1344
rb = rb->rb_right;
drivers/mtd/ubi/attach.c
1346
av = rb_entry(rb, struct ubi_ainf_volume, rb);
drivers/mtd/ubi/attach.c
1348
rb = rb_parent(rb);
drivers/mtd/ubi/attach.c
1349
if (rb) {
drivers/mtd/ubi/attach.c
1350
if (rb->rb_left == &av->rb)
drivers/mtd/ubi/attach.c
1351
rb->rb_left = NULL;
drivers/mtd/ubi/attach.c
1353
rb->rb_right = NULL;
drivers/mtd/ubi/attach.c
1415
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
drivers/mtd/ubi/attach.c
1416
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
drivers/mtd/ubi/attach.c
143
rb_link_node(&av->rb, parent, p);
drivers/mtd/ubi/attach.c
144
rb_insert_color(&av->rb, &ai->volumes);
drivers/mtd/ubi/attach.c
1676
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
drivers/mtd/ubi/attach.c
1719
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
drivers/mtd/ubi/attach.c
1790
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
drivers/mtd/ubi/attach.c
1792
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
drivers/mtd/ubi/attach.c
1878
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
drivers/mtd/ubi/attach.c
1879
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
drivers/mtd/ubi/attach.c
595
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
drivers/mtd/ubi/attach.c
704
rb_link_node(&aeb->u.rb, parent, p);
drivers/mtd/ubi/attach.c
705
rb_insert_color(&aeb->u.rb, &av->root);
drivers/mtd/ubi/attach.c
753
rb_erase(&av->rb, &ai->volumes);
drivers/mtd/ubi/eba.c
1534
struct rb_node *rb;
drivers/mtd/ubi/eba.c
1573
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
drivers/mtd/ubi/eba.c
1580
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
drivers/mtd/ubi/eba.c
1625
struct rb_node *rb;
drivers/mtd/ubi/eba.c
1657
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
drivers/mtd/ubi/eba.c
215
le = rb_entry(p, struct ubi_ltree_entry, rb);
drivers/mtd/ubi/eba.c
281
le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
drivers/mtd/ubi/eba.c
296
rb_link_node(&le->rb, parent, p);
drivers/mtd/ubi/eba.c
297
rb_insert_color(&le->rb, &ubi->ltree);
drivers/mtd/ubi/eba.c
342
rb_erase(&le->rb, &ubi->ltree);
drivers/mtd/ubi/eba.c
394
rb_erase(&le->rb, &ubi->ltree);
drivers/mtd/ubi/eba.c
418
rb_erase(&le->rb, &ubi->ltree);
drivers/mtd/ubi/fastmap-wl.c
244
rb_erase(&e->u.rb, &ubi->free);
drivers/mtd/ubi/fastmap-wl.c
32
ubi_rb_for_each_entry(p, e, root, u.rb) {
drivers/mtd/ubi/fastmap-wl.c
404
e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/fastmap-wl.c
558
struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/fastmap-wl.c
94
rb_erase(&e->u.rb, &ubi->free);
drivers/mtd/ubi/fastmap.c
213
tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
drivers/mtd/ubi/fastmap.c
228
rb_link_node(&aeb->u.rb, parent, p);
drivers/mtd/ubi/fastmap.c
229
rb_insert_color(&aeb->u.rb, &av->root);
drivers/mtd/ubi/fastmap.c
252
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
drivers/mtd/ubi/fastmap.c
321
rb_link_node(&new_aeb->u.rb, parent, p);
drivers/mtd/ubi/fastmap.c
322
rb_insert_color(&new_aeb->u.rb, &av->root);
drivers/mtd/ubi/fastmap.c
376
ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
drivers/mtd/ubi/fastmap.c
377
ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
drivers/mtd/ubi/fastmap.c
379
rb_erase(&aeb->u.rb, &av->root);
drivers/mtd/ubi/fastmap.c
532
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
drivers/mtd/ubi/fastmap.c
533
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
drivers/mtd/ubi/ubi.h
1016
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
drivers/mtd/ubi/ubi.h
1025
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
drivers/mtd/ubi/ubi.h
1034
ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
drivers/mtd/ubi/ubi.h
1053
#define ubi_rb_for_each_entry(rb, pos, root, member) \
drivers/mtd/ubi/ubi.h
1054
for (rb = rb_first(root), \
drivers/mtd/ubi/ubi.h
1055
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
drivers/mtd/ubi/ubi.h
1056
rb; \
drivers/mtd/ubi/ubi.h
1057
rb = rb_next(rb), \
drivers/mtd/ubi/ubi.h
1058
pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
drivers/mtd/ubi/ubi.h
1071
rb_erase(&aeb->u.rb, &av->root);
drivers/mtd/ubi/ubi.h
172
struct rb_node rb;
drivers/mtd/ubi/ubi.h
194
struct rb_node rb;
drivers/mtd/ubi/ubi.h
684
struct rb_node rb;
drivers/mtd/ubi/ubi.h
719
struct rb_node rb;
drivers/mtd/ubi/vtbl.c
366
struct rb_node *rb;
drivers/mtd/ubi/vtbl.c
399
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
drivers/mtd/ubi/wl.c
1061
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
1326
rb_erase(&e->u.rb, &ubi->used);
drivers/mtd/ubi/wl.c
1329
rb_erase(&e->u.rb, &ubi->scrub);
drivers/mtd/ubi/wl.c
1332
rb_erase(&e->u.rb, &ubi->erroneous);
drivers/mtd/ubi/wl.c
1401
rb_erase(&e->u.rb, &ubi->used);
drivers/mtd/ubi/wl.c
148
e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
1597
rb_erase(&e->u.rb, &ubi->used);
drivers/mtd/ubi/wl.c
1603
rb_erase(&e->u.rb, &ubi->free);
drivers/mtd/ubi/wl.c
163
rb_link_node(&e->u.rb, parent, p);
drivers/mtd/ubi/wl.c
1638
struct rb_node *rb;
drivers/mtd/ubi/wl.c
164
rb_insert_color(&e->u.rb, root);
drivers/mtd/ubi/wl.c
1641
rb = root->rb_node;
drivers/mtd/ubi/wl.c
1642
while (rb) {
drivers/mtd/ubi/wl.c
1643
if (rb->rb_left)
drivers/mtd/ubi/wl.c
1644
rb = rb->rb_left;
drivers/mtd/ubi/wl.c
1645
else if (rb->rb_right)
drivers/mtd/ubi/wl.c
1646
rb = rb->rb_right;
drivers/mtd/ubi/wl.c
1648
e = rb_entry(rb, struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
1650
rb = rb_parent(rb);
drivers/mtd/ubi/wl.c
1651
if (rb) {
drivers/mtd/ubi/wl.c
1652
if (rb->rb_left == &e->u.rb)
drivers/mtd/ubi/wl.c
1653
rb->rb_left = NULL;
drivers/mtd/ubi/wl.c
1655
rb->rb_right = NULL;
drivers/mtd/ubi/wl.c
1857
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
drivers/mtd/ubi/wl.c
1858
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
drivers/mtd/ubi/wl.c
2097
rb_erase(&e->u.rb, &ubi->free);
drivers/mtd/ubi/wl.c
250
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
333
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
340
e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
368
first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
369
last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
372
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
411
rb_erase(&e->u.rb, &ubi->free);
drivers/mtd/ubi/wl.c
729
rb_erase(&e1->u.rb, &ubi->used);
drivers/mtd/ubi/wl.c
741
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
756
rb_erase(&e1->u.rb, &ubi->used);
drivers/mtd/ubi/wl.c
762
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
drivers/mtd/ubi/wl.c
768
rb_erase(&e1->u.rb, &ubi->scrub);
drivers/net/dsa/realtek/rtl8366rb-leds.c
105
led = &rb->leds[dp->index][led_group];
drivers/net/dsa/realtek/rtl8366rb-leds.c
88
struct rtl8366rb *rb = priv->chip_data;
drivers/net/dsa/realtek/rtl8366rb.c
1272
struct rtl8366rb *rb;
drivers/net/dsa/realtek/rtl8366rb.c
1275
rb = priv->chip_data;
drivers/net/dsa/realtek/rtl8366rb.c
1291
ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
drivers/net/dsa/realtek/rtl8366rb.c
1379
struct rtl8366rb *rb;
drivers/net/dsa/realtek/rtl8366rb.c
1385
rb = priv->chip_data;
drivers/net/dsa/realtek/rtl8366rb.c
1386
rb->max_mtu[port] = new_mtu;
drivers/net/dsa/realtek/rtl8366rb.c
1394
if (rb->max_mtu[i] > max_mtu)
drivers/net/dsa/realtek/rtl8366rb.c
1395
max_mtu = rb->max_mtu[i];
drivers/net/dsa/realtek/rtl8366rb.c
1595
struct rtl8366rb *rb;
drivers/net/dsa/realtek/rtl8366rb.c
1599
rb = priv->chip_data;
drivers/net/dsa/realtek/rtl8366rb.c
1613
rb->pvid_enabled[port] = pvid_enabled;
drivers/net/dsa/realtek/rtl8366rb.c
799
struct rtl8366rb *rb;
drivers/net/dsa/realtek/rtl8366rb.c
806
rb = priv->chip_data;
drivers/net/dsa/realtek/rtl8366rb.c
937
rb->max_mtu[i] = ETH_DATA_LEN + RTL8366RB_CPU_TAG_SIZE;
drivers/net/dsa/realtek/rtl8366rb.c
939
rb->max_mtu[i] = ETH_DATA_LEN;
drivers/net/ethernet/brocade/bna/bfa_ioc.h
199
enum bfa_status (*ioc_pll_init) (void __iomem *rb,
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
251
void __iomem *rb;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
254
rb = bfa_ioc_bar0(ioc);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
256
ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
257
ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
258
ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
261
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
262
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
263
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
264
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
265
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
266
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
267
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
269
ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
270
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
271
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
272
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
273
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
274
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
275
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
281
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
282
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
283
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
284
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
289
ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
290
ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
291
ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
292
ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
293
ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
298
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
304
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
310
void __iomem *rb;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
313
rb = bfa_ioc_bar0(ioc);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
315
ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
316
ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
317
ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
318
ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
319
ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
320
ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
323
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
324
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
325
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
326
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
327
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
329
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
330
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
331
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
332
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
333
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
339
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
340
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
341
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
342
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
347
ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
348
ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
349
ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
350
ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
351
ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
356
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
362
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
371
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
377
r32 = readl(rb + FNC_PERS_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
386
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
389
r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
397
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
400
r32 = readl(rb + FNC_PERS_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
419
writel(r32, rb + FNC_PERS_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
447
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
450
r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
453
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
459
rb + HOSTFN_MSIX_VT_OFST_NUMVT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
461
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
49
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
51
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
601
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
616
writel(0, (rb + OP_MODE));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
620
(rb + ETH_MAC_SER_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
622
writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
624
(rb + ETH_MAC_SER_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
626
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
627
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
628
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
629
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
630
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
631
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
632
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
633
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
636
rb + APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
639
rb + APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
642
rb + APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
645
rb + APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
646
readl(rb + HOSTFN0_INT_MSK);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
648
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
649
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
652
rb + APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
655
rb + APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
658
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
659
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
661
r32 = readl(rb + PSS_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
663
writel(r32, (rb + PSS_CTL_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
666
writel(0, (rb + PMM_1T_RESET_REG_P0));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
667
writel(0, (rb + PMM_1T_RESET_REG_P1));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
670
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
672
r32 = readl(rb + MBIST_STAT_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
673
writel(0, (rb + MBIST_CTL_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
678
bfa_ioc_ct2_sclk_init(void __iomem *rb)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
685
r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
689
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
695
r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
697
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
702
r32 = readl(rb + CT2_CHIP_MISC_PRG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
704
rb + CT2_CHIP_MISC_PRG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
706
r32 = readl(rb + CT2_PCIE_MISC_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
708
rb + CT2_PCIE_MISC_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
713
r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
716
writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
730
bfa_ioc_ct2_lclk_init(void __iomem *rb)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
737
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
741
writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
746
r32 = readl(rb + CT2_CHIP_MISC_PRG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
747
writel(r32, (rb + CT2_CHIP_MISC_PRG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
752
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
753
writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
758
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
761
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
770
bfa_ioc_ct2_mem_init(void __iomem *rb)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
774
r32 = readl(rb + PSS_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
776
writel(r32, rb + PSS_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
779
writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
781
writel(0, rb + CT2_MBIST_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
785
bfa_ioc_ct2_mac_reset(void __iomem *rb)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
789
bfa_ioc_ct2_sclk_init(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
790
bfa_ioc_ct2_lclk_init(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
795
r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
797
rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
802
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
804
rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
808
rb + CT2_CSI_MAC_CONTROL_REG(0));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
810
rb + CT2_CSI_MAC_CONTROL_REG(1));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
818
bfa_ioc_ct2_nfc_halted(void __iomem *rb)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
822
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
830
bfa_ioc_ct2_nfc_resume(void __iomem *rb)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
835
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
837
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
846
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
851
wgn = readl(rb + CT2_WGN_STATUS);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
853
nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
857
if (bfa_ioc_ct2_nfc_halted(rb))
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
858
bfa_ioc_ct2_nfc_resume(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
860
rb + CT2_CSI_FW_CTL_SET_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
863
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
870
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
877
r32 = readl(rb + CT2_CSI_FW_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
880
writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
882
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
888
bfa_ioc_ct2_mac_reset(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
889
bfa_ioc_ct2_sclk_init(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
890
bfa_ioc_ct2_lclk_init(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
893
r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
895
rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
896
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
898
rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
903
r32 = readl(rb + PSS_GPIO_OUT_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
904
writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
905
r32 = readl(rb + PSS_GPIO_OE_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
906
writel(r32 | 1, rb + PSS_GPIO_OE_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
913
writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
914
writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
917
r32 = readl(rb + HOST_SEM5_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
919
r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
921
writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
922
readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
924
r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
926
writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
927
readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
931
bfa_ioc_ct2_mem_init(rb);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
933
writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
934
writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
310
void __iomem *rb, *reg_addr;
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
335
rb = bfa_ioc_bar0(ioc);
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
348
reg_addr = rb + addr;
drivers/net/ethernet/davicom/dm9051.c
147
unsigned int rb;
drivers/net/ethernet/davicom/dm9051.c
155
ret = regmap_read(db->regmap_dm, reg, &rb);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
103
if (cache_get(q, rb))
drivers/net/ethernet/fungible/funeth/funeth_rx.c
110
rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
112
if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
120
rb->page = p;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
121
rb->pg_refs = 1;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
122
refresh_refs(rb);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
123
rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
127
static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
129
if (rb->page) {
drivers/net/ethernet/fungible/funeth/funeth_rx.c
130
dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
drivers/net/ethernet/fungible/funeth/funeth_rx.c
132
__page_frag_cache_drain(rb->page, rb->pg_refs);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
133
rb->page = NULL;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
67
static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
drivers/net/ethernet/fungible/funeth/funeth_rx.c
79
*rb = *buf;
drivers/net/ethernet/fungible/funeth/funeth_rx.c
81
refresh_refs(rb);
drivers/net/ethernet/fungible/funeth/funeth_rx.c
98
static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
drivers/net/ethernet/microchip/lan743x_ethtool.c
1194
u32 *rb = p;
drivers/net/ethernet/microchip/lan743x_ethtool.c
1198
rb[ETH_PRIV_FLAGS] = adapter->flags;
drivers/net/ethernet/microchip/lan743x_ethtool.c
1199
rb[ETH_ID_REV] = lan743x_csr_read(adapter, ID_REV);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1200
rb[ETH_FPGA_REV] = lan743x_csr_read(adapter, FPGA_REV);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1201
rb[ETH_STRAP_READ] = lan743x_csr_read(adapter, STRAP_READ);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1202
rb[ETH_INT_STS] = lan743x_csr_read(adapter, INT_STS);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1203
rb[ETH_HW_CFG] = lan743x_csr_read(adapter, HW_CFG);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1204
rb[ETH_PMT_CTL] = lan743x_csr_read(adapter, PMT_CTL);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1205
rb[ETH_E2P_CMD] = lan743x_csr_read(adapter, E2P_CMD);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1206
rb[ETH_E2P_DATA] = lan743x_csr_read(adapter, E2P_DATA);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1207
rb[ETH_MAC_CR] = lan743x_csr_read(adapter, MAC_CR);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1208
rb[ETH_MAC_RX] = lan743x_csr_read(adapter, MAC_RX);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1209
rb[ETH_MAC_TX] = lan743x_csr_read(adapter, MAC_TX);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1210
rb[ETH_FLOW] = lan743x_csr_read(adapter, MAC_FLOW);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1211
rb[ETH_MII_ACC] = lan743x_csr_read(adapter, MAC_MII_ACC);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1212
rb[ETH_MII_DATA] = lan743x_csr_read(adapter, MAC_MII_DATA);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1213
rb[ETH_EEE_TX_LPI_REQ_DLY] = lan743x_csr_read(adapter,
drivers/net/ethernet/microchip/lan743x_ethtool.c
1215
rb[ETH_WUCSR] = lan743x_csr_read(adapter, MAC_WUCSR);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1216
rb[ETH_WK_SRC] = lan743x_csr_read(adapter, MAC_WK_SRC);
drivers/net/ethernet/microchip/lan743x_ethtool.c
1222
u32 *rb = p;
drivers/net/ethernet/microchip/lan743x_ethtool.c
1297
rb[regs[idx].id] = 0xFFFF;
drivers/net/ethernet/microchip/lan743x_ethtool.c
1299
rb[regs[idx].id] = val;
drivers/net/ethernet/ti/netcp_ethss.c
172
#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
drivers/net/ethernet/ti/netcp_ethss.c
173
offsetof(struct gbe##_##rb, rn)
drivers/net/ethernet/ti/netcp_ethss.c
174
#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
drivers/net/ethernet/ti/netcp_ethss.c
175
offsetof(struct gbenu##_##rb, rn)
drivers/net/ethernet/ti/netcp_ethss.c
176
#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
drivers/net/ethernet/ti/netcp_ethss.c
177
offsetof(struct xgbe##_##rb, rn)
drivers/net/ethernet/ti/netcp_ethss.c
178
#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3293
struct iwl_fw_error_dump_rb *rb;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3298
rb_len += sizeof(**data) + sizeof(*rb) + max_len;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3301
(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3302
rb = (void *)(*data)->data;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3303
rb->index = cpu_to_le32(i);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
3304
memcpy(rb->data, page_address(rxb->page), max_len);
drivers/net/wireless/microchip/wilc1000/spi.c
361
static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
drivers/net/wireless/microchip/wilc1000/spi.c
369
.rx_buf = rb,
drivers/net/wireless/microchip/wilc1000/spi.c
402
static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
drivers/net/wireless/microchip/wilc1000/spi.c
410
.rx_buf = rb,
drivers/net/wireless/microchip/wilc1000/spi.c
524
u8 wb[32], rb[32];
drivers/net/wireless/microchip/wilc1000/spi.c
532
memset(rb, 0x0, sizeof(rb));
drivers/net/wireless/microchip/wilc1000/spi.c
566
if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
drivers/net/wireless/microchip/wilc1000/spi.c
571
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
drivers/net/wireless/microchip/wilc1000/spi.c
619
u8 wb[32], rb[32];
drivers/net/wireless/microchip/wilc1000/spi.c
625
memset(rb, 0x0, sizeof(rb));
drivers/net/wireless/microchip/wilc1000/spi.c
663
if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
drivers/net/wireless/microchip/wilc1000/spi.c
668
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
drivers/net/wireless/microchip/wilc1000/spi.c
694
u8 wb[32], rb[32];
drivers/net/wireless/microchip/wilc1000/spi.c
702
memset(rb, 0x0, sizeof(rb));
drivers/net/wireless/microchip/wilc1000/spi.c
740
if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
drivers/net/wireless/microchip/wilc1000/spi.c
745
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
drivers/net/wireless/microchip/wilc1000/spi.c
820
u8 wb[32], rb[32];
drivers/net/wireless/microchip/wilc1000/spi.c
829
memset(rb, 0x0, sizeof(rb));
drivers/net/wireless/microchip/wilc1000/spi.c
849
if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
drivers/net/wireless/microchip/wilc1000/spi.c
854
r = (struct wilc_spi_special_cmd_rsp *)&rb[cmd_len];
drivers/of/of_reserved_mem.c
511
const struct reserved_mem *ra = a, *rb = b;
drivers/of/of_reserved_mem.c
513
if (ra->base < rb->base)
drivers/of/of_reserved_mem.c
516
if (ra->base > rb->base)
drivers/of/of_reserved_mem.c
524
if (ra->size < rb->size)
drivers/of/of_reserved_mem.c
526
if (ra->size > rb->size)
drivers/of/of_reserved_mem.c
529
if (ra->fdt_node < rb->fdt_node)
drivers/of/of_reserved_mem.c
531
if (ra->fdt_node > rb->fdt_node)
drivers/platform/x86/amd/pmf/acpi.c
340
struct pmf_cbi_ring_buffer *rb = &pmf_dev->cbi_buf;
drivers/platform/x86/amd/pmf/acpi.c
348
rb->data[rb->head].val[i] = pmf_dev->req1.custom_policy[i];
drivers/platform/x86/amd/pmf/acpi.c
349
rb->data[rb->head].preq = pmf_dev->req1.pending_req;
drivers/platform/x86/amd/pmf/acpi.c
354
rb->data[rb->head].val[i] = pmf_dev->req.custom_policy[i];
drivers/platform/x86/amd/pmf/acpi.c
355
rb->data[rb->head].preq = pmf_dev->req.pending_req;
drivers/platform/x86/amd/pmf/acpi.c
361
if (CIRC_SPACE(rb->head, rb->tail, CUSTOM_BIOS_INPUT_RING_ENTRIES) == 0) {
drivers/platform/x86/amd/pmf/acpi.c
364
rb->tail = (rb->tail + 1) & (CUSTOM_BIOS_INPUT_RING_ENTRIES - 1);
drivers/platform/x86/amd/pmf/acpi.c
367
rb->head = (rb->head + 1) & (CUSTOM_BIOS_INPUT_RING_ENTRIES - 1);
drivers/platform/x86/amd/pmf/spc.c
154
struct pmf_cbi_ring_buffer *rb = &pdev->cbi_buf;
drivers/platform/x86/amd/pmf/spc.c
162
if (CIRC_CNT(rb->head, rb->tail, CUSTOM_BIOS_INPUT_RING_ENTRIES) == 0)
drivers/platform/x86/amd/pmf/spc.c
166
if (!rb->data[rb->tail].preq)
drivers/platform/x86/amd/pmf/spc.c
176
amd_pmf_update_bios_inputs(pdev, &rb->data[rb->tail], custom_bios_inputs_v1, in);
drivers/platform/x86/amd/pmf/spc.c
179
amd_pmf_update_bios_inputs(pdev, &rb->data[rb->tail], custom_bios_inputs, in);
drivers/platform/x86/amd/pmf/spc.c
186
rb->tail = (rb->tail + 1) & (CUSTOM_BIOS_INPUT_RING_ENTRIES - 1);
drivers/platform/x86/intel/ishtp_eclite.c
374
(struct ecl_message *)opr_dev->rb->buffer.data;
drivers/platform/x86/intel/ishtp_eclite.c
396
(struct ecl_message_header *)opr_dev->rb->buffer.data;
drivers/platform/x86/intel/ishtp_eclite.c
428
struct ishtp_cl_rb *rb;
drivers/platform/x86/intel/ishtp_eclite.c
431
while ((rb = ishtp_cl_rx_get_rb(opr_dev->ecl_ishtp_cl)) != NULL) {
drivers/platform/x86/intel/ishtp_eclite.c
432
opr_dev->rb = rb;
drivers/platform/x86/intel/ishtp_eclite.c
433
header = (struct ecl_message_header *)rb->buffer.data;
drivers/platform/x86/intel/ishtp_eclite.c
444
ishtp_cl_io_rb_recycle(rb);
drivers/platform/x86/intel/ishtp_eclite.c
81
struct ishtp_cl_rb *rb;
drivers/pwm/pwm-atmel-tcb.c
45
u32 rb;
drivers/pwm/pwm-atmel-tcb.c
494
regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(channel, RB), &chan->rb);
drivers/pwm/pwm-atmel-tcb.c
509
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(channel, RB), chan->rb);
drivers/scsi/bfa/bfa_ioc.c
5483
void __iomem *rb;
drivers/scsi/bfa/bfa_ioc.c
5485
rb = bfa_ioc_bar0(ioc);
drivers/scsi/bfa/bfa_ioc.c
5486
return readl(rb + BFA_PHY_LOCK_STATUS);
drivers/scsi/bfa/bfa_ioc.h
370
bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
drivers/scsi/bfa/bfa_ioc.h
899
bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
drivers/scsi/bfa/bfa_ioc.h
900
bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
drivers/scsi/bfa/bfa_ioc.h
901
bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
drivers/scsi/bfa/bfa_ioc_cb.c
138
void __iomem *rb;
drivers/scsi/bfa/bfa_ioc_cb.c
141
rb = bfa_ioc_bar0(ioc);
drivers/scsi/bfa/bfa_ioc_cb.c
143
ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
drivers/scsi/bfa/bfa_ioc_cb.c
144
ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
drivers/scsi/bfa/bfa_ioc_cb.c
145
ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
drivers/scsi/bfa/bfa_ioc_cb.c
148
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
drivers/scsi/bfa/bfa_ioc_cb.c
149
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
drivers/scsi/bfa/bfa_ioc_cb.c
150
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
drivers/scsi/bfa/bfa_ioc_cb.c
152
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
153
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
154
ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
160
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
drivers/scsi/bfa/bfa_ioc_cb.c
161
ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
drivers/scsi/bfa/bfa_ioc_cb.c
166
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
167
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
168
ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
169
ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
174
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
175
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
180
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
drivers/scsi/bfa/bfa_ioc_cb.c
186
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
357
bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
drivers/scsi/bfa/bfa_ioc_cb.c
369
join_bits = readl(rb + BFA_IOC0_STATE_REG) &
drivers/scsi/bfa/bfa_ioc_cb.c
371
writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG));
drivers/scsi/bfa/bfa_ioc_cb.c
372
join_bits = readl(rb + BFA_IOC1_STATE_REG) &
drivers/scsi/bfa/bfa_ioc_cb.c
374
writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG));
drivers/scsi/bfa/bfa_ioc_cb.c
375
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
drivers/scsi/bfa/bfa_ioc_cb.c
376
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
drivers/scsi/bfa/bfa_ioc_cb.c
377
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_cb.c
378
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_cb.c
379
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
drivers/scsi/bfa/bfa_ioc_cb.c
380
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
drivers/scsi/bfa/bfa_ioc_cb.c
381
writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
383
rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
384
writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
386
rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
388
writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
389
writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
391
rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
393
rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_cb.c
395
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_cb.c
396
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_cb.c
397
writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_cb.c
398
writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
185
void __iomem *rb;
drivers/scsi/bfa/bfa_ioc_ct.c
188
rb = bfa_ioc_bar0(ioc);
drivers/scsi/bfa/bfa_ioc_ct.c
190
ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
drivers/scsi/bfa/bfa_ioc_ct.c
191
ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
drivers/scsi/bfa/bfa_ioc_ct.c
192
ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
drivers/scsi/bfa/bfa_ioc_ct.c
195
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
196
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
197
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
198
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
drivers/scsi/bfa/bfa_ioc_ct.c
199
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
drivers/scsi/bfa/bfa_ioc_ct.c
200
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
drivers/scsi/bfa/bfa_ioc_ct.c
201
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
drivers/scsi/bfa/bfa_ioc_ct.c
203
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
204
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
205
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
206
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
drivers/scsi/bfa/bfa_ioc_ct.c
207
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
drivers/scsi/bfa/bfa_ioc_ct.c
208
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
drivers/scsi/bfa/bfa_ioc_ct.c
209
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
drivers/scsi/bfa/bfa_ioc_ct.c
215
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
216
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
217
ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
218
ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
223
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
224
ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
225
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
226
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
drivers/scsi/bfa/bfa_ioc_ct.c
227
ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
drivers/scsi/bfa/bfa_ioc_ct.c
232
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
drivers/scsi/bfa/bfa_ioc_ct.c
238
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
244
void __iomem *rb;
drivers/scsi/bfa/bfa_ioc_ct.c
247
rb = bfa_ioc_bar0(ioc);
drivers/scsi/bfa/bfa_ioc_ct.c
249
ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
drivers/scsi/bfa/bfa_ioc_ct.c
250
ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
drivers/scsi/bfa/bfa_ioc_ct.c
251
ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
drivers/scsi/bfa/bfa_ioc_ct.c
252
ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
drivers/scsi/bfa/bfa_ioc_ct.c
253
ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
drivers/scsi/bfa/bfa_ioc_ct.c
254
ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
drivers/scsi/bfa/bfa_ioc_ct.c
257
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
258
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
259
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
260
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
drivers/scsi/bfa/bfa_ioc_ct.c
261
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
drivers/scsi/bfa/bfa_ioc_ct.c
263
ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
264
ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
265
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
drivers/scsi/bfa/bfa_ioc_ct.c
266
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
drivers/scsi/bfa/bfa_ioc_ct.c
267
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
drivers/scsi/bfa/bfa_ioc_ct.c
273
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
274
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
275
ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
276
ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
281
ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
282
ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
283
ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
284
ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
drivers/scsi/bfa/bfa_ioc_ct.c
285
ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
drivers/scsi/bfa/bfa_ioc_ct.c
290
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
drivers/scsi/bfa/bfa_ioc_ct.c
296
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
307
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/scsi/bfa/bfa_ioc_ct.c
313
r32 = readl(rb + FNC_PERS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
324
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/scsi/bfa/bfa_ioc_ct.c
327
r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
drivers/scsi/bfa/bfa_ioc_ct.c
340
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/scsi/bfa/bfa_ioc_ct.c
343
r32 = readl(rb + FNC_PERS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
364
writel(r32, rb + FNC_PERS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
564
void __iomem *rb = ioc->pcidev.pci_bar_kva;
drivers/scsi/bfa/bfa_ioc_ct.c
567
r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
drivers/scsi/bfa/bfa_ioc_ct.c
570
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
drivers/scsi/bfa/bfa_ioc_ct.c
576
rb + HOSTFN_MSIX_VT_OFST_NUMVT);
drivers/scsi/bfa/bfa_ioc_ct.c
578
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
drivers/scsi/bfa/bfa_ioc_ct.c
582
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
drivers/scsi/bfa/bfa_ioc_ct.c
597
writel(0, (rb + OP_MODE));
drivers/scsi/bfa/bfa_ioc_ct.c
599
__APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
601
writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
drivers/scsi/bfa/bfa_ioc_ct.c
602
writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
604
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
605
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
606
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
drivers/scsi/bfa/bfa_ioc_ct.c
607
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
drivers/scsi/bfa/bfa_ioc_ct.c
608
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_ct.c
609
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_ct.c
610
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
drivers/scsi/bfa/bfa_ioc_ct.c
611
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
drivers/scsi/bfa/bfa_ioc_ct.c
613
rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
615
rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
617
__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
619
__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
620
readl(rb + HOSTFN0_INT_MSK);
drivers/scsi/bfa/bfa_ioc_ct.c
622
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_ct.c
623
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
drivers/scsi/bfa/bfa_ioc_ct.c
624
writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
625
writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
628
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
drivers/scsi/bfa/bfa_ioc_ct.c
629
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
drivers/scsi/bfa/bfa_ioc_ct.c
631
r32 = readl((rb + PSS_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
633
writel(r32, (rb + PSS_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
636
writel(0, (rb + PMM_1T_RESET_REG_P0));
drivers/scsi/bfa/bfa_ioc_ct.c
637
writel(0, (rb + PMM_1T_RESET_REG_P1));
drivers/scsi/bfa/bfa_ioc_ct.c
640
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
642
r32 = readl((rb + MBIST_STAT_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
643
writel(0, (rb + MBIST_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
648
bfa_ioc_ct2_sclk_init(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
655
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
659
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
665
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
667
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
672
r32 = readl((rb + CT2_CHIP_MISC_PRG));
drivers/scsi/bfa/bfa_ioc_ct.c
673
writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
drivers/scsi/bfa/bfa_ioc_ct.c
675
r32 = readl((rb + CT2_PCIE_MISC_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
676
writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
681
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
684
writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
693
bfa_ioc_ct2_lclk_init(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
700
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
704
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
709
r32 = readl((rb + CT2_CHIP_MISC_PRG));
drivers/scsi/bfa/bfa_ioc_ct.c
710
writel(r32, (rb + CT2_CHIP_MISC_PRG));
drivers/scsi/bfa/bfa_ioc_ct.c
715
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
716
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
721
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
724
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
733
bfa_ioc_ct2_mem_init(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
737
r32 = readl((rb + PSS_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
739
writel(r32, (rb + PSS_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
742
writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
744
writel(0, (rb + CT2_MBIST_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
748
bfa_ioc_ct2_mac_reset(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
752
rb + CT2_CSI_MAC_CONTROL_REG(0));
drivers/scsi/bfa/bfa_ioc_ct.c
754
rb + CT2_CSI_MAC_CONTROL_REG(1));
drivers/scsi/bfa/bfa_ioc_ct.c
758
bfa_ioc_ct2_enable_flash(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
762
r32 = readl((rb + PSS_GPIO_OUT_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
763
writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
764
r32 = readl((rb + PSS_GPIO_OE_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
765
writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
775
bfa_ioc_ct2_nfc_halted(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
779
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
787
bfa_ioc_ct2_nfc_halt(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
791
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
793
if (bfa_ioc_ct2_nfc_halted(rb))
drivers/scsi/bfa/bfa_ioc_ct.c
797
WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
drivers/scsi/bfa/bfa_ioc_ct.c
801
bfa_ioc_ct2_nfc_resume(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
806
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
808
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
817
bfa_ioc_ct2_clk_reset(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
821
bfa_ioc_ct2_sclk_init(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
822
bfa_ioc_ct2_lclk_init(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
827
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
829
(rb + CT2_APP_PLL_SCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
831
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
833
(rb + CT2_APP_PLL_LCLK_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
838
bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
842
r32 = readl((rb + PSS_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
844
writel(r32, (rb + PSS_CTL_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
846
writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
849
r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
857
r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
864
r32 = readl(rb + CT2_CSI_FW_CTL_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
869
bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
drivers/scsi/bfa/bfa_ioc_ct.c
874
if (bfa_ioc_ct2_nfc_halted(rb))
drivers/scsi/bfa/bfa_ioc_ct.c
875
bfa_ioc_ct2_nfc_resume(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
877
r32 = readl(rb + CT2_NFC_STS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
883
r32 = readl(rb + CT2_NFC_STS_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
888
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
drivers/scsi/bfa/bfa_ioc_ct.c
892
wgn = readl(rb + CT2_WGN_STATUS);
drivers/scsi/bfa/bfa_ioc_ct.c
898
bfa_ioc_ct2_clk_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
899
bfa_ioc_ct2_enable_flash(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
901
bfa_ioc_ct2_mac_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
903
bfa_ioc_ct2_clk_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
904
bfa_ioc_ct2_enable_flash(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
907
nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
912
bfa_ioc_ct2_wait_till_nfc_running(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
914
bfa_ioc_ct2_nfc_clk_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
916
bfa_ioc_ct2_nfc_halt(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
918
bfa_ioc_ct2_clk_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
919
bfa_ioc_ct2_mac_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
920
bfa_ioc_ct2_clk_reset(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
932
r32 = readl(rb + CT2_CHIP_MISC_PRG);
drivers/scsi/bfa/bfa_ioc_ct.c
933
writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG));
drivers/scsi/bfa/bfa_ioc_ct.c
940
writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
drivers/scsi/bfa/bfa_ioc_ct.c
941
writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
drivers/scsi/bfa/bfa_ioc_ct.c
944
r32 = readl(rb + HOST_SEM5_REG);
drivers/scsi/bfa/bfa_ioc_ct.c
946
r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
drivers/scsi/bfa/bfa_ioc_ct.c
948
writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
drivers/scsi/bfa/bfa_ioc_ct.c
949
readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
drivers/scsi/bfa/bfa_ioc_ct.c
951
r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
drivers/scsi/bfa/bfa_ioc_ct.c
953
writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
drivers/scsi/bfa/bfa_ioc_ct.c
954
readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
drivers/scsi/bfa/bfa_ioc_ct.c
958
bfa_ioc_ct2_mem_init(rb);
drivers/scsi/bfa/bfa_ioc_ct.c
960
writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
drivers/scsi/bfa/bfa_ioc_ct.c
961
writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
drivers/scsi/bfa/bfad_debugfs.c
249
void __iomem *rb, *reg_addr;
drivers/scsi/bfa/bfad_debugfs.c
279
rb = bfa_ioc_bar0(ioc);
drivers/scsi/bfa/bfad_debugfs.c
293
reg_addr = rb + addr;
drivers/scsi/initio.c
339
u8 instr, rb;
drivers/scsi/initio.c
352
rb = inb(base + TUL_NVRAM);
drivers/scsi/initio.c
353
rb &= SE2DI;
drivers/scsi/initio.c
354
val += (rb << i);
drivers/spi/spi-mpc52xx-psc.c
110
unsigned rb = 0; /* number of bytes received */
drivers/spi/spi-mpc52xx-psc.c
124
while (rb < t->len) {
drivers/spi/spi-mpc52xx-psc.c
125
if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
drivers/spi/spi-mpc52xx-psc.c
130
rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
drivers/spi/spi-mpc52xx-psc.c
152
if (t->len - rb == 1) {
drivers/spi/spi-mpc52xx-psc.c
165
for (; recv_at_once; rb++, recv_at_once--)
drivers/spi/spi-mpc52xx-psc.c
166
rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8);
drivers/spi/spi-mpc52xx-psc.c
168
for (; recv_at_once; rb++, recv_at_once--)
drivers/staging/media/av7110/av7110_av.c
432
#define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
drivers/staging/media/av7110/av7110_av.c
437
struct dvb_ringbuffer *rb;
drivers/staging/media/av7110/av7110_av.c
443
rb = (type) ? &av7110->avout : &av7110->aout;
drivers/staging/media/av7110/av7110_av.c
456
if (wait_event_interruptible(rb->queue, FREE_COND_TS))
drivers/target/iscsi/iscsi_target_configfs.c
44
ssize_t rb;
drivers/target/iscsi/iscsi_target_configfs.c
474
ssize_t rb; \
drivers/target/iscsi/iscsi_target_configfs.c
479
rb = snprintf(page, PAGE_SIZE, \
drivers/target/iscsi/iscsi_target_configfs.c
48
rb = sysfs_emit(page, "1\n");
drivers/target/iscsi/iscsi_target_configfs.c
483
rb = snprintf(page, PAGE_SIZE, "%u\n", \
drivers/target/iscsi/iscsi_target_configfs.c
488
return rb; \
drivers/target/iscsi/iscsi_target_configfs.c
50
rb = sysfs_emit(page, "0\n");
drivers/target/iscsi/iscsi_target_configfs.c
52
return rb;
drivers/target/iscsi/iscsi_target_configfs.c
530
ssize_t rb = 0;
drivers/target/iscsi/iscsi_target_configfs.c
536
rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
drivers/target/iscsi/iscsi_target_configfs.c
541
rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
drivers/target/iscsi/iscsi_target_configfs.c
543
rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
drivers/target/iscsi/iscsi_target_configfs.c
546
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
549
rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
drivers/target/iscsi/iscsi_target_configfs.c
552
rb += sysfs_emit_at(page, rb, "Session State: ");
drivers/target/iscsi/iscsi_target_configfs.c
555
rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
drivers/target/iscsi/iscsi_target_configfs.c
558
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
drivers/target/iscsi/iscsi_target_configfs.c
561
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
drivers/target/iscsi/iscsi_target_configfs.c
564
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
drivers/target/iscsi/iscsi_target_configfs.c
567
rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
drivers/target/iscsi/iscsi_target_configfs.c
570
rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
drivers/target/iscsi/iscsi_target_configfs.c
575
rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
drivers/target/iscsi/iscsi_target_configfs.c
577
rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
drivers/target/iscsi/iscsi_target_configfs.c
580
rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
drivers/target/iscsi/iscsi_target_configfs.c
586
rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
drivers/target/iscsi/iscsi_target_configfs.c
591
rb += sysfs_emit_at(page, rb, "CID: %hu Connection"
drivers/target/iscsi/iscsi_target_configfs.c
595
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
599
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
603
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
607
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
611
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
615
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
619
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
623
rb += sysfs_emit_at(page, rb,
drivers/target/iscsi/iscsi_target_configfs.c
628
rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr,
drivers/target/iscsi/iscsi_target_configfs.c
631
rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n",
drivers/target/iscsi/iscsi_target_configfs.c
638
return rb;
drivers/target/iscsi/iscsi_target_configfs.c
748
ssize_t rb; \
drivers/target/iscsi/iscsi_target_configfs.c
753
rb = sysfs_emit(page, "%u\n", tpg->tpg_attrib.name); \
drivers/target/iscsi/iscsi_target_configfs.c
755
return rb; \
drivers/target/iscsi/iscsi_target_configfs.c
922
ssize_t rb; \
drivers/target/iscsi/iscsi_target_configfs.c
933
rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
drivers/target/iscsi/iscsi_target_configfs.c
936
return rb; \
drivers/tty/hvc/hvc_iucv.c
221
struct iucv_tty_buffer *rb;
drivers/tty/hvc/hvc_iucv.c
239
rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
drivers/tty/hvc/hvc_iucv.c
242
if (!rb->mbuf) { /* message not yet received ... */
drivers/tty/hvc/hvc_iucv.c
245
rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
drivers/tty/hvc/hvc_iucv.c
246
if (!rb->mbuf)
drivers/tty/hvc/hvc_iucv.c
249
rc = __iucv_message_receive(priv->path, &rb->msg, 0,
drivers/tty/hvc/hvc_iucv.c
250
rb->mbuf, rb->msg.length, NULL);
drivers/tty/hvc/hvc_iucv.c
262
if (rc || (rb->mbuf->version != MSG_VERSION) ||
drivers/tty/hvc/hvc_iucv.c
263
(rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
drivers/tty/hvc/hvc_iucv.c
267
switch (rb->mbuf->type) {
drivers/tty/hvc/hvc_iucv.c
269
written = min_t(int, rb->mbuf->datalen - rb->offset, count);
drivers/tty/hvc/hvc_iucv.c
270
memcpy(buf, rb->mbuf->data + rb->offset, written);
drivers/tty/hvc/hvc_iucv.c
271
if (written < (rb->mbuf->datalen - rb->offset)) {
drivers/tty/hvc/hvc_iucv.c
272
rb->offset += written;
drivers/tty/hvc/hvc_iucv.c
279
if (rb->mbuf->datalen != sizeof(struct winsize))
drivers/tty/hvc/hvc_iucv.c
283
__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
drivers/tty/hvc/hvc_iucv.c
293
list_del(&rb->list);
drivers/tty/hvc/hvc_iucv.c
294
destroy_tty_buffer(rb);
drivers/tty/hvc/hvc_iucv.c
904
struct iucv_tty_buffer *rb;
drivers/tty/hvc/hvc_iucv.c
921
rb = alloc_tty_buffer(0, GFP_ATOMIC);
drivers/tty/hvc/hvc_iucv.c
922
if (!rb) {
drivers/tty/hvc/hvc_iucv.c
926
rb->msg = *msg;
drivers/tty/hvc/hvc_iucv.c
928
list_add_tail(&rb->list, &priv->tty_inqueue);
drivers/usb/class/cdc-acm.c
1424
struct acm_rb *rb = &(acm->read_buffers[i]);
drivers/usb/class/cdc-acm.c
1427
rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
drivers/usb/class/cdc-acm.c
1428
&rb->dma);
drivers/usb/class/cdc-acm.c
1429
if (!rb->base)
drivers/usb/class/cdc-acm.c
1431
rb->index = i;
drivers/usb/class/cdc-acm.c
1432
rb->instance = acm;
drivers/usb/class/cdc-acm.c
1439
urb->transfer_dma = rb->dma;
drivers/usb/class/cdc-acm.c
1441
usb_fill_int_urb(urb, acm->dev, acm->in, rb->base,
drivers/usb/class/cdc-acm.c
1443
acm_read_bulk_callback, rb,
drivers/usb/class/cdc-acm.c
1446
usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base,
drivers/usb/class/cdc-acm.c
1448
acm_read_bulk_callback, rb);
drivers/usb/class/cdc-acm.c
518
struct acm_rb *rb = urb->context;
drivers/usb/class/cdc-acm.c
519
struct acm *acm = rb->instance;
drivers/usb/class/cdc-acm.c
526
rb->index, urb->actual_length, status);
drivers/usb/class/cdc-acm.c
550
set_bit(rb->index, &acm->urbs_in_error_delay);
drivers/usb/class/cdc-acm.c
567
set_bit(rb->index, &acm->read_urbs_free);
drivers/usb/class/cdc-acm.c
586
acm_submit_read_urb(acm, rb->index, GFP_ATOMIC);
drivers/usb/gadget/function/uvc_queue.c
181
struct v4l2_requestbuffers *rb)
drivers/usb/gadget/function/uvc_queue.c
186
ret = vb2_reqbufs(&queue->queue, rb);
drivers/usb/gadget/function/uvc_queue.c
195
return ret ? ret : rb->count;
drivers/usb/gadget/function/uvc_queue.h
74
struct v4l2_requestbuffers *rb);
drivers/vhost/iotlb.c
20
rb, __u64, __subtree_last,
drivers/video/fbdev/core/fbcvt.c
294
int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
drivers/video/fbdev/core/fbcvt.c
303
if (rb)
drivers/video/fbdev/core/modedb.c
654
int yres_specified = 0, cvt = 0, rb = 0;
drivers/video/fbdev/core/modedb.c
668
if (cvt || rb)
drivers/video/fbdev/core/modedb.c
679
if (cvt || rb)
drivers/video/fbdev/core/modedb.c
700
rb = 1;
drivers/video/fbdev/core/modedb.c
734
(rb) ? " reduced blanking" : "",
drivers/video/fbdev/core/modedb.c
748
ret = fb_find_mode_cvt(&cvt_mode, margins, rb);
drivers/video/fbdev/omap2/omapfb/dss/dispc.c
1063
FLD_VAL(coefs->rb, 9, 0);
drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
366
info.cpr_coefs.rb,
drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
387
&coefs.rr, &coefs.rg, &coefs.rb,
drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
392
arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
drivers/xen/xen-scsiback.c
1471
ssize_t rb;
drivers/xen/xen-scsiback.c
1474
rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
drivers/xen/xen-scsiback.c
1477
return rb;
drivers/xen/xenbus/xenbus_dev_frontend.c
130
struct read_buffer *rb;
drivers/xen/xenbus/xenbus_dev_frontend.c
148
rb = list_entry(u->read_buffers.next, struct read_buffer, list);
drivers/xen/xenbus/xenbus_dev_frontend.c
151
size_t sz = min_t(size_t, len - i, rb->len - rb->cons);
drivers/xen/xenbus/xenbus_dev_frontend.c
153
ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
drivers/xen/xenbus/xenbus_dev_frontend.c
156
rb->cons += sz - ret;
drivers/xen/xenbus/xenbus_dev_frontend.c
165
if (rb->cons == rb->len) {
drivers/xen/xenbus/xenbus_dev_frontend.c
166
list_del(&rb->list);
drivers/xen/xenbus/xenbus_dev_frontend.c
167
kfree(rb);
drivers/xen/xenbus/xenbus_dev_frontend.c
170
rb = list_entry(u->read_buffers.next,
drivers/xen/xenbus/xenbus_dev_frontend.c
191
struct read_buffer *rb;
drivers/xen/xenbus/xenbus_dev_frontend.c
198
rb = kmalloc_flex(*rb, msg, len);
drivers/xen/xenbus/xenbus_dev_frontend.c
199
if (rb == NULL)
drivers/xen/xenbus/xenbus_dev_frontend.c
202
rb->cons = 0;
drivers/xen/xenbus/xenbus_dev_frontend.c
203
rb->len = len;
drivers/xen/xenbus/xenbus_dev_frontend.c
205
memcpy(rb->msg, data, len);
drivers/xen/xenbus/xenbus_dev_frontend.c
207
list_add_tail(&rb->list, queue);
drivers/xen/xenbus/xenbus_dev_frontend.c
217
struct read_buffer *rb;
drivers/xen/xenbus/xenbus_dev_frontend.c
220
rb = list_entry(list->next, struct read_buffer, list);
drivers/xen/xenbus/xenbus_dev_frontend.c
222
kfree(rb);
drivers/xen/xenbus/xenbus_dev_frontend.c
311
struct read_buffer *rb, *tmp_rb;
drivers/xen/xenbus/xenbus_dev_frontend.c
332
list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
drivers/xen/xenbus/xenbus_dev_frontend.c
333
list_del(&rb->list);
drivers/xen/xenbus/xenbus_dev_frontend.c
334
kfree(rb);
fs/afs/server.c
515
struct rb_node *rb;
fs/afs/server.c
518
for (rb = rb_first(&cell->fs_servers); rb; rb = rb_next(rb)) {
fs/afs/server.c
519
server = rb_entry(rb, struct afs_server, uuid_rb);
fs/btrfs/extent_map.c
358
struct rb_node *rb;
fs/btrfs/extent_map.c
375
rb = rb_prev(&em->rb_node);
fs/btrfs/extent_map.c
376
merge = rb_entry_safe(rb, struct extent_map, rb_node);
fs/btrfs/extent_map.c
378
if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
fs/btrfs/extent_map.c
393
rb = rb_next(&em->rb_node);
fs/btrfs/extent_map.c
394
merge = rb_entry_safe(rb, struct extent_map, rb_node);
fs/btrfs/extent_map.c
396
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
fs/btrfs/raid56.c
1788
const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
fs/btrfs/raid56.c
1791
u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
fs/erofs/erofs_fs.h
63
} __packed rb;
fs/erofs/super.c
325
((u64)le16_to_cpu(dsb->rb.blocks_hi) << 32);
fs/erofs/super.c
327
sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b);
fs/jffs2/nodelist.c
124
struct rb_node *parent = &base->rb;
fs/jffs2/nodelist.c
131
base = rb_entry(parent, struct jffs2_node_frag, rb);
fs/jffs2/nodelist.c
134
link = &base->rb.rb_right;
fs/jffs2/nodelist.c
136
link = &base->rb.rb_left;
fs/jffs2/nodelist.c
143
rb_link_node(&newfrag->rb, &base->rb, link);
fs/jffs2/nodelist.c
189
rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
fs/jffs2/nodelist.c
193
rb_link_node(&holefrag->rb, NULL, &root->rb_node);
fs/jffs2/nodelist.c
195
rb_insert_color(&holefrag->rb, root);
fs/jffs2/nodelist.c
204
rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
fs/jffs2/nodelist.c
207
rb_link_node(&newfrag->rb, NULL, &root->rb_node);
fs/jffs2/nodelist.c
209
rb_insert_color(&newfrag->rb, root);
fs/jffs2/nodelist.c
298
rb_insert_color(&newfrag->rb, root);
fs/jffs2/nodelist.c
301
rb_insert_color(&newfrag2->rb, root);
fs/jffs2/nodelist.c
310
rb_insert_color(&newfrag->rb, root);
fs/jffs2/nodelist.c
317
rb_replace_node(&this->rb, &newfrag->rb, root);
fs/jffs2/nodelist.c
327
rb_insert_color(&this->rb, root);
fs/jffs2/nodelist.c
338
rb_erase(&this->rb, root);
fs/jffs2/nodelist.c
537
frag = rb_entry(next, struct jffs2_node_frag, rb);
fs/jffs2/nodelist.c
543
next = frag->rb.rb_right;
fs/jffs2/nodelist.c
545
next = frag->rb.rb_left;
fs/jffs2/nodelist.c
570
rbtree_postorder_for_each_entry_safe(frag, next, root, rb) {
fs/jffs2/nodelist.h
230
struct rb_node rb;
fs/jffs2/nodelist.h
271
struct rb_node rb;
fs/jffs2/nodelist.h
334
return rb_entry(node, struct jffs2_node_frag, rb);
fs/jffs2/nodelist.h
344
return rb_entry(node, struct jffs2_node_frag, rb);
fs/jffs2/nodelist.h
347
#define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
fs/jffs2/nodelist.h
348
#define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
fs/jffs2/nodelist.h
349
#define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
fs/jffs2/nodelist.h
350
#define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
fs/jffs2/nodelist.h
351
#define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
fs/jffs2/nodelist.h
352
#define frag_erase(frag, list) rb_erase(&frag->rb, list)
fs/jffs2/nodelist.h
354
#define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
fs/jffs2/nodelist.h
355
#define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
fs/jffs2/nodelist.h
356
#define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
fs/jffs2/nodelist.h
357
#define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb)
fs/jffs2/nodelist.h
358
#define tn_right(tn) rb_entry((tn)->rb.rb_right, struct jffs2_tmp_dnode_info, rb)
fs/jffs2/nodelist.h
359
#define tn_erase(tn, list) rb_erase(&tn->rb, list)
fs/jffs2/nodelist.h
360
#define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb)
fs/jffs2/nodelist.h
361
#define tn_first(list) rb_entry(rb_first(list), struct jffs2_tmp_dnode_info, rb)
fs/jffs2/readinode.c
185
tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
fs/jffs2/readinode.c
188
next = tn->rb.rb_right;
fs/jffs2/readinode.c
190
next = tn->rb.rb_left;
fs/jffs2/readinode.c
286
rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
fs/jffs2/readinode.c
344
insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
fs/jffs2/readinode.c
346
link = &insert_point->rb.rb_right;
fs/jffs2/readinode.c
349
link = &insert_point->rb.rb_left;
fs/jffs2/readinode.c
351
link = &insert_point->rb.rb_right;
fs/jffs2/readinode.c
353
rb_link_node(&tn->rb, &insert_point->rb, link);
fs/jffs2/readinode.c
354
rb_insert_color(&tn->rb, &rii->tn_root);
fs/jffs2/readinode.c
432
this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
fs/jffs2/readinode.c
440
rb_link_node(&tn->rb, parent, link);
fs/jffs2/readinode.c
441
rb_insert_color(&tn->rb, ver_root);
fs/jffs2/readinode.c
476
eat_last(&rii->tn_root, &last->rb);
fs/jffs2/readinode.c
499
eat_last(&ver_root, &this->rb);
fs/jffs2/readinode.c
532
eat_last(&ver_root, &vers_next->rb);
fs/jffs2/readinode.c
548
rbtree_postorder_for_each_entry_safe(tn, next, list, rb) {
fs/kernfs/dir.c
1389
rbn = rb_next(&pos->rb);
fs/kernfs/dir.c
1406
WARN_ON_ONCE(rcu_access_pointer(kn->__parent) && RB_EMPTY_NODE(&kn->rb));
fs/kernfs/dir.c
1488
if (kernfs_parent(kn) && RB_EMPTY_NODE(&kn->rb))
fs/kernfs/dir.c
1680
WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
fs/kernfs/dir.c
1861
struct rb_node *node = rb_next(&pos->rb);
fs/kernfs/dir.c
1876
struct rb_node *node = rb_next(&pos->rb);
fs/kernfs/dir.c
30
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
fs/kernfs/dir.c
382
node = &pos->rb.rb_left;
fs/kernfs/dir.c
384
node = &pos->rb.rb_right;
fs/kernfs/dir.c
390
rb_link_node(&kn->rb, parent, node);
fs/kernfs/dir.c
391
rb_insert_color(&kn->rb, &kn_parent->dir.children);
fs/kernfs/dir.c
420
if (RB_EMPTY_NODE(&kn->rb))
fs/kernfs/dir.c
430
rb_erase(&kn->rb, &kn_parent->dir.children);
fs/kernfs/dir.c
431
RB_CLEAR_NODE(&kn->rb);
fs/kernfs/dir.c
657
RB_CLEAR_NODE(&kn->rb);
fs/kernfs/dir.c
68
struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
fs/kernfs/dir.c
70
if (ra != rb)
fs/kernfs/dir.c
74
db = kernfs_depth(rb->kn, b);
fs/nilfs2/recovery.c
365
struct nilfs_recovery_block *rb;
fs/nilfs2/recovery.c
373
rb = kmalloc_obj(*rb, GFP_NOFS);
fs/nilfs2/recovery.c
374
if (unlikely(!rb)) {
fs/nilfs2/recovery.c
378
rb->ino = ino;
fs/nilfs2/recovery.c
379
rb->blocknr = blocknr++;
fs/nilfs2/recovery.c
380
rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
fs/nilfs2/recovery.c
381
rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
fs/nilfs2/recovery.c
383
list_add_tail(&rb->list, head);
fs/nilfs2/recovery.c
402
struct nilfs_recovery_block *rb;
fs/nilfs2/recovery.c
404
rb = list_first_entry(head, struct nilfs_recovery_block, list);
fs/nilfs2/recovery.c
405
list_del(&rb->list);
fs/nilfs2/recovery.c
406
kfree(rb);
fs/nilfs2/recovery.c
507
struct nilfs_recovery_block *rb,
fs/nilfs2/recovery.c
513
bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
fs/nilfs2/recovery.c
529
struct nilfs_recovery_block *rb, *n;
fs/nilfs2/recovery.c
535
list_for_each_entry_safe(rb, n, head, list) {
fs/nilfs2/recovery.c
536
inode = nilfs_iget(sb, root, rb->ino);
fs/nilfs2/recovery.c
543
pos = rb->blkoff << inode->i_blkbits;
fs/nilfs2/recovery.c
555
err = nilfs_recovery_copy_block(nilfs, rb, pos, folio);
fs/nilfs2/recovery.c
578
err, (unsigned long)rb->ino,
fs/nilfs2/recovery.c
579
(unsigned long long)rb->blkoff);
fs/nilfs2/recovery.c
584
list_del_init(&rb->list);
fs/nilfs2/recovery.c
585
kfree(rb);
fs/ocfs2/alloc.c
385
struct ocfs2_refcount_block *rb = et->et_object;
fs/ocfs2/alloc.c
387
et->et_root_el = &rb->rf_list;
fs/ocfs2/alloc.c
393
struct ocfs2_refcount_block *rb = et->et_object;
fs/ocfs2/alloc.c
395
rb->rf_last_eb_blk = cpu_to_le64(blkno);
fs/ocfs2/alloc.c
400
struct ocfs2_refcount_block *rb = et->et_object;
fs/ocfs2/alloc.c
402
return le64_to_cpu(rb->rf_last_eb_blk);
fs/ocfs2/alloc.c
408
struct ocfs2_refcount_block *rb = et->et_object;
fs/ocfs2/alloc.c
410
le32_add_cpu(&rb->rf_clusters, clusters);
fs/ocfs2/refcounttree.c
101
rb->rf_signature);
fs/ocfs2/refcounttree.c
105
if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
fs/ocfs2/refcounttree.c
1072
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
1075
if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
fs/ocfs2/refcounttree.c
1083
el = &rb->rf_list;
fs/ocfs2/refcounttree.c
109
(unsigned long long)le64_to_cpu(rb->rf_blkno));
fs/ocfs2/refcounttree.c
113
if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
fs/ocfs2/refcounttree.c
1150
ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
fs/ocfs2/refcounttree.c
1153
if ((rb->rf_records.rl_recs[index].r_refcount ==
fs/ocfs2/refcounttree.c
1154
rb->rf_records.rl_recs[index + 1].r_refcount) &&
fs/ocfs2/refcounttree.c
1155
(le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
fs/ocfs2/refcounttree.c
1156
le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
fs/ocfs2/refcounttree.c
1157
le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
fs/ocfs2/refcounttree.c
1164
ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
fs/ocfs2/refcounttree.c
1169
if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
fs/ocfs2/refcounttree.c
117
le32_to_cpu(rb->rf_fs_generation));
fs/ocfs2/refcounttree.c
1170
ret = ocfs2_refcount_rec_adjacent(rb, index);
fs/ocfs2/refcounttree.c
1175
tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
fs/ocfs2/refcounttree.c
1188
static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
fs/ocfs2/refcounttree.c
1191
BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
fs/ocfs2/refcounttree.c
1192
rb->rf_records.rl_recs[index+1].r_refcount);
fs/ocfs2/refcounttree.c
1194
le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
fs/ocfs2/refcounttree.c
1195
le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
fs/ocfs2/refcounttree.c
1197
if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
fs/ocfs2/refcounttree.c
1198
memmove(&rb->rf_records.rl_recs[index + 1],
fs/ocfs2/refcounttree.c
1199
&rb->rf_records.rl_recs[index + 2],
fs/ocfs2/refcounttree.c
1201
(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
fs/ocfs2/refcounttree.c
1203
memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
fs/ocfs2/refcounttree.c
1205
le16_add_cpu(&rb->rf_records.rl_used, -1);
fs/ocfs2/refcounttree.c
1211
static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
fs/ocfs2/refcounttree.c
1215
ocfs2_refcount_rec_contig(rb, index);
fs/ocfs2/refcounttree.c
1225
ocfs2_rotate_refcount_rec_left(rb, index);
fs/ocfs2/refcounttree.c
1228
ocfs2_rotate_refcount_rec_left(rb, index);
fs/ocfs2/refcounttree.c
1241
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
1243
struct ocfs2_refcount_list *rl = &rb->rf_records;
fs/ocfs2/refcounttree.c
1269
ocfs2_refcount_rec_merge(rb, index);
fs/ocfs2/refcounttree.c
1447
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
1449
struct ocfs2_refcount_list *rl = &rb->rf_records;
fs/ocfs2/refcounttree.c
1655
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
1659
if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
fs/ocfs2/refcounttree.c
1662
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
fs/ocfs2/refcounttree.c
1663
old_cpos = le32_to_cpu(rb->rf_cpos);
fs/ocfs2/refcounttree.c
1719
rb->rf_cpos = cpu_to_le32(new_cpos);
fs/ocfs2/refcounttree.c
1738
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
1740
struct ocfs2_refcount_list *rf_list = &rb->rf_records;
fs/ocfs2/refcounttree.c
1743
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
fs/ocfs2/refcounttree.c
1765
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
fs/ocfs2/refcounttree.c
1766
rf_list = &rb->rf_records;
fs/ocfs2/refcounttree.c
1792
ocfs2_refcount_rec_merge(rb, index);
fs/ocfs2/refcounttree.c
1828
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
1830
struct ocfs2_refcount_list *rf_list = &rb->rf_records;
fs/ocfs2/refcounttree.c
1835
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
fs/ocfs2/refcounttree.c
1895
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
fs/ocfs2/refcounttree.c
1896
rf_list = &rb->rf_records;
fs/ocfs2/refcounttree.c
1963
ocfs2_refcount_rec_merge(rb, index);
fs/ocfs2/refcounttree.c
2076
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
2080
BUG_ON(rb->rf_records.rl_used);
fs/ocfs2/refcounttree.c
2085
le32_to_cpu(rb->rf_cpos));
fs/ocfs2/refcounttree.c
2088
ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
fs/ocfs2/refcounttree.c
2102
le16_to_cpu(rb->rf_suballoc_slot),
fs/ocfs2/refcounttree.c
2103
le64_to_cpu(rb->rf_suballoc_loc),
fs/ocfs2/refcounttree.c
2104
le64_to_cpu(rb->rf_blkno),
fs/ocfs2/refcounttree.c
2105
le16_to_cpu(rb->rf_suballoc_bit));
fs/ocfs2/refcounttree.c
2118
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
fs/ocfs2/refcounttree.c
2120
le32_add_cpu(&rb->rf_clusters, -1);
fs/ocfs2/refcounttree.c
2126
if (!rb->rf_list.l_next_free_rec) {
fs/ocfs2/refcounttree.c
2127
BUG_ON(rb->rf_clusters);
fs/ocfs2/refcounttree.c
2132
rb->rf_flags = 0;
fs/ocfs2/refcounttree.c
2133
rb->rf_parent = 0;
fs/ocfs2/refcounttree.c
2134
rb->rf_cpos = 0;
fs/ocfs2/refcounttree.c
2135
memset(&rb->rf_records, 0, sb->s_blocksize -
fs/ocfs2/refcounttree.c
2137
rb->rf_records.rl_count =
fs/ocfs2/refcounttree.c
2168
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
2170
struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
fs/ocfs2/refcounttree.c
2203
if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
fs/ocfs2/refcounttree.c
2373
struct ocfs2_refcount_block *rb;
fs/ocfs2/refcounttree.c
2393
rb = (struct ocfs2_refcount_block *)
fs/ocfs2/refcounttree.c
2396
if (le16_to_cpu(rb->rf_records.rl_used) +
fs/ocfs2/refcounttree.c
2398
le16_to_cpu(rb->rf_records.rl_count))
fs/ocfs2/refcounttree.c
2457
rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
fs/ocfs2/refcounttree.c
2459
if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
fs/ocfs2/refcounttree.c
2460
le16_to_cpu(rb->rf_records.rl_count))
fs/ocfs2/refcounttree.c
2479
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
fs/ocfs2/refcounttree.c
2480
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
fs/ocfs2/refcounttree.c
3509
struct ocfs2_refcount_block *rb;
fs/ocfs2/refcounttree.c
3536
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
fs/ocfs2/refcounttree.c
3545
if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
fs/ocfs2/refcounttree.c
3546
le16_to_cpu(rb->rf_records.rl_count))
fs/ocfs2/refcounttree.c
3565
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
fs/ocfs2/refcounttree.c
3566
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
fs/ocfs2/refcounttree.c
453
struct ocfs2_refcount_block *rb;
fs/ocfs2/refcounttree.c
479
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
fs/ocfs2/refcounttree.c
489
if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
fs/ocfs2/refcounttree.c
561
struct ocfs2_refcount_block *rb;
fs/ocfs2/refcounttree.c
623
rb = (struct ocfs2_refcount_block *)new_bh->b_data;
fs/ocfs2/refcounttree.c
624
memset(rb, 0, inode->i_sb->s_blocksize);
fs/ocfs2/refcounttree.c
625
strscpy(rb->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
fs/ocfs2/refcounttree.c
626
rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
fs/ocfs2/refcounttree.c
627
rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
fs/ocfs2/refcounttree.c
628
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
fs/ocfs2/refcounttree.c
629
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
fs/ocfs2/refcounttree.c
630
rb->rf_blkno = cpu_to_le64(first_blkno);
fs/ocfs2/refcounttree.c
631
rb->rf_count = cpu_to_le32(1);
fs/ocfs2/refcounttree.c
632
rb->rf_records.rl_count =
fs/ocfs2/refcounttree.c
635
rb->rf_generation = cpu_to_le32(osb->s_next_generation++);
fs/ocfs2/refcounttree.c
654
new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
fs/ocfs2/refcounttree.c
702
struct ocfs2_refcount_block *rb;
fs/ocfs2/refcounttree.c
735
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
fs/ocfs2/refcounttree.c
736
le32_add_cpu(&rb->rf_count, 1);
fs/ocfs2/refcounttree.c
763
struct ocfs2_refcount_block *rb;
fs/ocfs2/refcounttree.c
77
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
782
rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
fs/ocfs2/refcounttree.c
788
if (le32_to_cpu(rb->rf_count) == 1) {
fs/ocfs2/refcounttree.c
789
blk = le64_to_cpu(rb->rf_blkno);
fs/ocfs2/refcounttree.c
790
bit = le16_to_cpu(rb->rf_suballoc_bit);
fs/ocfs2/refcounttree.c
791
if (rb->rf_suballoc_loc)
fs/ocfs2/refcounttree.c
792
bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
fs/ocfs2/refcounttree.c
798
le16_to_cpu(rb->rf_suballoc_slot));
fs/ocfs2/refcounttree.c
843
le32_add_cpu(&rb->rf_count , -1);
fs/ocfs2/refcounttree.c
846
if (!rb->rf_count) {
fs/ocfs2/refcounttree.c
883
struct ocfs2_refcount_block *rb =
fs/ocfs2/refcounttree.c
887
for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
fs/ocfs2/refcounttree.c
888
rec = &rb->rf_records.rl_recs[i];
fs/ocfs2/refcounttree.c
89
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
fs/ocfs2/refcounttree.c
906
if (i < le16_to_cpu(rb->rf_records.rl_used) &&
fs/ocfs2/refcounttree.c
97
if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
fs/ocfs2/xattr.c
6313
struct ocfs2_refcount_block *rb =
fs/ocfs2/xattr.c
6337
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
fs/ocfs2/xattr.c
6338
*credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
fs/ocfs2/xattr.c
6339
le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
fs/ocfs2/xattr.c
6776
struct ocfs2_refcount_block *rb;
fs/ocfs2/xattr.c
6800
rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data;
fs/ocfs2/xattr.c
6807
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
fs/ocfs2/xattr.c
6808
*credits += le16_to_cpu(rb->rf_list.l_tree_depth) *
fs/ocfs2/xattr.c
6809
le16_to_cpu(rb->rf_list.l_next_free_rec) + 1;
fs/ubifs/debug.c
1787
struct rb_node rb;
fs/ubifs/debug.c
1834
fscki = rb_entry(parent, struct fsck_inode, rb);
fs/ubifs/debug.c
1890
rb_link_node(&fscki->rb, parent, p);
fs/ubifs/debug.c
1891
rb_insert_color(&fscki->rb, &fsckd->inodes);
fs/ubifs/debug.c
1912
fscki = rb_entry(p, struct fsck_inode, rb);
fs/ubifs/debug.c
2156
rbtree_postorder_for_each_entry_safe(fscki, n, &fsckd->inodes, rb)
fs/ubifs/debug.c
2181
fscki = rb_entry(this, struct fsck_inode, rb);
fs/ubifs/debug.c
615
struct rb_node *rb;
fs/ubifs/debug.c
657
for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
fs/ubifs/debug.c
658
bud = rb_entry(rb, struct ubifs_bud, rb);
fs/ubifs/debug.c
683
struct rb_node *rb;
fs/ubifs/debug.c
745
for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
fs/ubifs/debug.c
746
bud = rb_entry(rb, struct ubifs_bud, rb);
fs/ubifs/log.c
122
b = rb_entry(parent, struct ubifs_bud, rb);
fs/ubifs/log.c
130
rb_link_node(&bud->rb, parent, p);
fs/ubifs/log.c
131
rb_insert_color(&bud->rb, &c->buds);
fs/ubifs/log.c
311
bud = rb_entry(p1, struct ubifs_bud, rb);
fs/ubifs/log.c
38
bud = rb_entry(p, struct ubifs_bud, rb);
fs/ubifs/log.c
549
struct rb_node rb;
fs/ubifs/log.c
568
dr = rb_entry(parent, struct done_ref, rb);
fs/ubifs/log.c
583
rb_link_node(&dr->rb, parent, p);
fs/ubifs/log.c
584
rb_insert_color(&dr->rb, done_tree);
fs/ubifs/log.c
597
rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb)
fs/ubifs/log.c
71
bud = rb_entry(p, struct ubifs_bud, rb);
fs/ubifs/orphan.c
104
o = rb_entry(p, struct ubifs_orphan, rb);
fs/ubifs/orphan.c
118
rb_erase(&o->rb, &c->orph_tree);
fs/ubifs/orphan.c
139
rb_erase(&orph->rb, &c->orph_tree);
fs/ubifs/orphan.c
73
o = rb_entry(parent, struct ubifs_orphan, rb);
fs/ubifs/orphan.c
732
struct rb_node rb;
fs/ubifs/orphan.c
769
o = rb_entry(parent, struct check_orphan, rb);
fs/ubifs/orphan.c
779
rb_link_node(&orphan->rb, parent, p);
fs/ubifs/orphan.c
780
rb_insert_color(&orphan->rb, root);
fs/ubifs/orphan.c
791
o = rb_entry(p, struct check_orphan, rb);
fs/ubifs/orphan.c
806
rbtree_postorder_for_each_entry_safe(o, n, root, rb)
fs/ubifs/orphan.c
87
rb_link_node(&orphan->rb, parent, p);
fs/ubifs/orphan.c
88
rb_insert_color(&orphan->rb, &c->orph_tree);
fs/ubifs/recovery.c
1230
struct rb_node rb;
fs/ubifs/recovery.c
1254
e = rb_entry(parent, struct size_entry, rb);
fs/ubifs/recovery.c
1270
rb_link_node(&e->rb, parent, p);
fs/ubifs/recovery.c
1271
rb_insert_color(&e->rb, &c->size_tree);
fs/ubifs/recovery.c
1287
e = rb_entry(p, struct size_entry, rb);
fs/ubifs/recovery.c
1309
rb_erase(&e->rb, &c->size_tree);
fs/ubifs/recovery.c
1321
rbtree_postorder_for_each_entry_safe(e, n, &c->size_tree, rb) {
fs/ubifs/recovery.c
1510
rb_erase(&e->rb, &c->size_tree);
fs/ubifs/recovery.c
1534
e = rb_entry(this, struct size_entry, rb);
fs/ubifs/recovery.c
1581
rb_erase(&e->rb, &c->size_tree);
fs/ubifs/replay.c
306
struct replay_entry *ra, *rb;
fs/ubifs/replay.c
313
rb = list_entry(b, struct replay_entry, list);
fs/ubifs/replay.c
314
ubifs_assert(c, ra->sqnum != rb->sqnum);
fs/ubifs/replay.c
315
if (ra->sqnum > rb->sqnum)
fs/ubifs/super.c
921
rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) {
fs/ubifs/tnc.c
184
rbtree_postorder_for_each_entry_safe(old_idx, n, &c->old_idx, rb)
fs/ubifs/tnc.c
56
o = rb_entry(parent, struct ubifs_old_idx, rb);
fs/ubifs/tnc.c
71
rb_link_node(&old_idx->rb, parent, p);
fs/ubifs/tnc.c
72
rb_insert_color(&old_idx->rb, &c->old_idx);
fs/ubifs/tnc_commit.c
170
o = rb_entry(p, struct ubifs_old_idx, rb);
fs/ubifs/ubifs.h
275
struct rb_node rb;
fs/ubifs/ubifs.h
717
struct rb_node rb;
fs/ubifs/ubifs.h
922
struct rb_node rb;
fs/xfs/scrub/bmap_repair.c
103
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
123
rb->reflink_scan = RLS_SET_IFLAG;
fs/xfs/scrub/bmap_repair.c
131
struct xrep_bmap *rb,
fs/xfs/scrub/bmap_repair.c
143
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
151
if (rb->reflink_scan == RLS_UNKNOWN && !unwritten) {
fs/xfs/scrub/bmap_repair.c
152
error = xrep_bmap_discover_shared(rb, startblock, blockcount);
fs/xfs/scrub/bmap_repair.c
163
fa = xfs_bmap_validate_extent(sc->ip, rb->whichfork, &irec);
fs/xfs/scrub/bmap_repair.c
169
trace_xrep_bmap_found(sc->ip, rb->whichfork, &irec);
fs/xfs/scrub/bmap_repair.c
174
error = xfarray_append(rb->bmap_records, &rbe);
fs/xfs/scrub/bmap_repair.c
178
rb->real_mappings++;
fs/xfs/scrub/bmap_repair.c
191
struct xrep_bmap *rb,
fs/xfs/scrub/bmap_repair.c
195
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
248
struct xrep_bmap *rb = priv;
fs/xfs/scrub/bmap_repair.c
252
if (xchk_should_terminate(rb->sc, &error))
fs/xfs/scrub/bmap_repair.c
255
if (rec->rm_owner != rb->sc->ip->i_ino)
fs/xfs/scrub/bmap_repair.c
258
error = xrep_bmap_check_fork_rmap(rb, cur, rec);
fs/xfs/scrub/bmap_repair.c
266
rb->nblocks += rec->rm_blockcount;
fs/xfs/scrub/bmap_repair.c
269
if (rb->whichfork == XFS_DATA_FORK &&
fs/xfs/scrub/bmap_repair.c
272
if (rb->whichfork == XFS_ATTR_FORK &&
fs/xfs/scrub/bmap_repair.c
277
if ((rec->rm_flags & XFS_RMAP_UNWRITTEN) && !rb->allow_unwritten)
fs/xfs/scrub/bmap_repair.c
283
rb->old_bmbt_block_count += rec->rm_blockcount;
fs/xfs/scrub/bmap_repair.c
284
return xfsb_bitmap_set(&rb->old_bmbt_blocks, fsbno,
fs/xfs/scrub/bmap_repair.c
288
return xrep_bmap_from_rmap(rb, rec->rm_offset, fsbno,
fs/xfs/scrub/bmap_repair.c
320
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
327
error = xfarray_sort(rb->bmap_records, xrep_bmap_extent_cmp,
fs/xfs/scrub/bmap_repair.c
332
foreach_xfarray_idx(rb->bmap_records, array_cur) {
fs/xfs/scrub/bmap_repair.c
335
if (xchk_should_terminate(rb->sc, &error))
fs/xfs/scrub/bmap_repair.c
338
error = xfarray_load(rb->bmap_records, array_cur, &rec);
fs/xfs/scrub/bmap_repair.c
356
struct xrep_bmap *rb,
fs/xfs/scrub/bmap_repair.c
359
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
366
error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_bmap_walk_rmap, rb);
fs/xfs/scrub/bmap_repair.c
412
struct xrep_bmap *rb = priv;
fs/xfs/scrub/bmap_repair.c
415
if (xchk_should_terminate(rb->sc, &error))
fs/xfs/scrub/bmap_repair.c
419
if (rec->rm_owner != rb->sc->ip->i_ino)
fs/xfs/scrub/bmap_repair.c
422
error = xrep_bmap_check_rtfork_rmap(rb->sc, cur, rec);
fs/xfs/scrub/bmap_repair.c
430
rb->nblocks += rec->rm_blockcount;
fs/xfs/scrub/bmap_repair.c
433
if (rb->whichfork == XFS_DATA_FORK &&
fs/xfs/scrub/bmap_repair.c
436
if (rb->whichfork == XFS_ATTR_FORK &&
fs/xfs/scrub/bmap_repair.c
440
return xrep_bmap_from_rmap(rb, rec->rm_offset,
fs/xfs/scrub/bmap_repair.c
450
struct xrep_bmap *rb,
fs/xfs/scrub/bmap_repair.c
453
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
466
error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_bmap_walk_rtrmap, rb);
fs/xfs/scrub/bmap_repair.c
473
xrep_bmap_scan_rtgroup(struct xrep_bmap *rb, struct xfs_rtgroup *rtg)
fs/xfs/scrub/bmap_repair.c
482
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
487
struct xfs_inode *ip = rb->sc->ip;
fs/xfs/scrub/bmap_repair.c
488
struct xfs_ifork *ifp = xfs_ifork_ptr(ip, rb->whichfork);
fs/xfs/scrub/bmap_repair.c
495
if (rb->whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0)
fs/xfs/scrub/bmap_repair.c
504
trace_xrep_bmap_found(ip, rb->whichfork, &irec);
fs/xfs/scrub/bmap_repair.c
506
if (xchk_should_terminate(rb->sc, &error))
fs/xfs/scrub/bmap_repair.c
509
error = xfarray_append(rb->bmap_records, &rbe);
fs/xfs/scrub/bmap_repair.c
524
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
526
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
538
error = xrep_bmap_scan_rtgroup(rb, rtg);
fs/xfs/scrub/bmap_repair.c
548
error = xrep_bmap_scan_ag(rb, pag);
fs/xfs/scrub/bmap_repair.c
555
return xrep_bmap_find_delalloc(rb);
fs/xfs/scrub/bmap_repair.c
569
struct xrep_bmap *rb = priv;
fs/xfs/scrub/bmap_repair.c
576
error = xfarray_load(rb->bmap_records, rb->array_cur++,
fs/xfs/scrub/bmap_repair.c
598
struct xrep_bmap *rb = priv;
fs/xfs/scrub/bmap_repair.c
600
return xrep_newbt_claim_block(cur, &rb->new_bmapbt, ptr);
fs/xfs/scrub/bmap_repair.c
619
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
621
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
622
struct xbtree_ifakeroot *ifake = &rb->new_bmapbt.ifake;
fs/xfs/scrub/bmap_repair.c
625
if (rb->reflink_scan == RLS_SET_IFLAG)
fs/xfs/scrub/bmap_repair.c
632
delta = ifake->if_blocks - rb->old_bmbt_block_count;
fs/xfs/scrub/bmap_repair.c
633
sc->ip->i_nblocks = rb->nblocks + delta;
fs/xfs/scrub/bmap_repair.c
651
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
655
struct xfs_ifork *ifp = rb->new_bmapbt.ifake.if_fork;
fs/xfs/scrub/bmap_repair.c
663
foreach_xfarray_idx(rb->bmap_records, array_cur) {
fs/xfs/scrub/bmap_repair.c
666
error = xfarray_load(rb->bmap_records, array_cur, &rec);
fs/xfs/scrub/bmap_repair.c
679
return xrep_ino_ensure_extent_count(rb->sc, rb->whichfork,
fs/xfs/scrub/bmap_repair.c
689
struct xrep_bmap *rb,
fs/xfs/scrub/bmap_repair.c
692
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
697
&rb->new_bmapbt.bload, rb->real_mappings);
fs/xfs/scrub/bmap_repair.c
712
rb->new_bmapbt.bload.nr_blocks, 0, true);
fs/xfs/scrub/bmap_repair.c
717
error = xrep_newbt_alloc_blocks(&rb->new_bmapbt,
fs/xfs/scrub/bmap_repair.c
718
rb->new_bmapbt.bload.nr_blocks);
fs/xfs/scrub/bmap_repair.c
723
rb->array_cur = XFARRAY_CURSOR_INIT;
fs/xfs/scrub/bmap_repair.c
724
error = xfs_btree_bload(bmap_cur, &rb->new_bmapbt.bload, rb);
fs/xfs/scrub/bmap_repair.c
735
return xrep_bmap_extents_load(rb);
fs/xfs/scrub/bmap_repair.c
747
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
750
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
752
struct xbtree_ifakeroot *ifake = &rb->new_bmapbt.ifake;
fs/xfs/scrub/bmap_repair.c
755
error = xrep_bmap_sort_records(rb);
fs/xfs/scrub/bmap_repair.c
763
xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, rb->whichfork);
fs/xfs/scrub/bmap_repair.c
764
error = xrep_newbt_init_inode(&rb->new_bmapbt, sc, rb->whichfork,
fs/xfs/scrub/bmap_repair.c
769
rb->new_bmapbt.bload.get_records = xrep_bmap_get_records;
fs/xfs/scrub/bmap_repair.c
770
rb->new_bmapbt.bload.claim_block = xrep_bmap_claim_block;
fs/xfs/scrub/bmap_repair.c
771
rb->new_bmapbt.bload.iroot_size = xrep_bmap_iroot_size;
fs/xfs/scrub/bmap_repair.c
785
if (rb->real_mappings <= XFS_IFORK_MAXEXT(sc->ip, rb->whichfork)) {
fs/xfs/scrub/bmap_repair.c
787
error = xrep_bmap_extents_load(rb);
fs/xfs/scrub/bmap_repair.c
790
error = xrep_bmap_btree_load(rb, bmap_cur);
fs/xfs/scrub/bmap_repair.c
801
xfs_bmbt_commit_staged_btree(bmap_cur, sc->tp, rb->whichfork);
fs/xfs/scrub/bmap_repair.c
805
error = xrep_bmap_reset_counters(rb);
fs/xfs/scrub/bmap_repair.c
810
error = xrep_newbt_commit(&rb->new_bmapbt);
fs/xfs/scrub/bmap_repair.c
820
xrep_newbt_cancel(&rb->new_bmapbt);
fs/xfs/scrub/bmap_repair.c
830
struct xrep_bmap *rb)
fs/xfs/scrub/bmap_repair.c
832
struct xfs_scrub *sc = rb->sc;
fs/xfs/scrub/bmap_repair.c
836
xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, rb->whichfork);
fs/xfs/scrub/bmap_repair.c
837
return xrep_reap_fsblocks(sc, &rb->old_bmbt_blocks, &oinfo);
fs/xfs/scrub/bmap_repair.c
925
struct xrep_bmap *rb;
fs/xfs/scrub/bmap_repair.c
936
rb = kzalloc_obj(struct xrep_bmap, XCHK_GFP_FLAGS);
fs/xfs/scrub/bmap_repair.c
937
if (!rb)
fs/xfs/scrub/bmap_repair.c
939
rb->sc = sc;
fs/xfs/scrub/bmap_repair.c
940
rb->whichfork = whichfork;
fs/xfs/scrub/bmap_repair.c
941
rb->reflink_scan = xrep_bmap_init_reflink_scan(sc, whichfork);
fs/xfs/scrub/bmap_repair.c
942
rb->allow_unwritten = allow_unwritten;
fs/xfs/scrub/bmap_repair.c
948
sizeof(struct xfs_bmbt_rec), &rb->bmap_records);
fs/xfs/scrub/bmap_repair.c
953
xfsb_bitmap_init(&rb->old_bmbt_blocks);
fs/xfs/scrub/bmap_repair.c
954
error = xrep_bmap_find_mappings(rb);
fs/xfs/scrub/bmap_repair.c
961
error = xrep_bmap_build_new_fork(rb);
fs/xfs/scrub/bmap_repair.c
966
error = xrep_bmap_remove_old_tree(rb);
fs/xfs/scrub/bmap_repair.c
971
xfsb_bitmap_destroy(&rb->old_bmbt_blocks);
fs/xfs/scrub/bmap_repair.c
972
xfarray_destroy(rb->bmap_records);
fs/xfs/scrub/bmap_repair.c
974
kfree(rb);
fs/xfs/scrub/bmap_repair.c
99
struct xrep_bmap *rb,
fs/xfs/xfs_extfree_item.c
388
struct xfs_extent_free_item *rb = xefi_entry(b);
fs/xfs/xfs_extfree_item.c
390
return ra->xefi_group->xg_gno - rb->xefi_group->xg_gno;
fs/xfs/xfs_refcount_item.c
267
struct xfs_refcount_intent *rb = ci_entry(b);
fs/xfs/xfs_refcount_item.c
269
return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
fs/xfs/xfs_rmap_item.c
268
struct xfs_rmap_intent *rb = ri_entry(b);
fs/xfs/xfs_rmap_item.c
270
return ra->ri_group->xg_gno - rb->ri_group->xg_gno;
include/drm/drm_buddy.h
49
struct rb_node rb;
include/drm/drm_connector.h
1734
bool rb;
include/drm/drm_edid.h
461
bool rb);
include/drm/drm_gpuvm.h
148
} rb;
include/drm/drm_gpuvm.h
268
} rb;
include/drm/drm_gpuvm.h
451
if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
include/drm/drm_gpuvm.h
452
return list_next_entry(va, rb.entry);
include/drm/drm_gpuvm.h
511
list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
include/drm/drm_gpuvm.h
524
list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
include/drm/drm_mm.h
168
struct rb_node rb;
include/linux/fb.h
801
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
include/linux/intel-ish-client-if.h
104
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
include/linux/interval_tree.h
8
struct rb_node rb;
include/linux/interval_tree_generic.h
150
struct rb_node *rb = node->ITRB.rb_right, *prev; \
include/linux/interval_tree_generic.h
160
if (rb) { \
include/linux/interval_tree_generic.h
161
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
include/linux/interval_tree_generic.h
169
rb = rb_parent(&node->ITRB); \
include/linux/interval_tree_generic.h
170
if (!rb) \
include/linux/interval_tree_generic.h
173
node = rb_entry(rb, ITSTRUCT, ITRB); \
include/linux/interval_tree_generic.h
174
rb = node->ITRB.rb_right; \
include/linux/interval_tree_generic.h
175
} while (prev == rb); \
include/linux/kernfs.h
210
struct rb_node rb;
include/linux/mm_types.h
1041
struct rb_node rb;
include/linux/perf_event.h
1153
struct perf_buffer *rb;
include/linux/perf_event.h
865
struct perf_buffer *rb;
include/linux/rbtree_augmented.h
103
RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
include/linux/rbtree_augmented.h
105
while (rb != stop) { \
include/linux/rbtree_augmented.h
106
RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
include/linux/rbtree_augmented.h
109
rb = rb_parent(&node->RBFIELD); \
include/linux/rbtree_augmented.h
179
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
include/linux/rbtree_augmented.h
180
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
include/linux/rbtree_augmented.h
181
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
include/linux/rbtree_augmented.h
183
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
include/linux/rbtree_augmented.h
185
rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
include/linux/rbtree_augmented.h
188
static inline void rb_set_parent_color(struct rb_node *rb,
include/linux/rbtree_augmented.h
191
rb->__rb_parent_color = (unsigned long)p + color;
include/linux/rmap.h
87
struct rb_node rb; /* locked by anon_vma->rwsem */
include/linux/skbuff.h
4127
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
include/linux/vhost_iotlb.h
8
struct rb_node rb;
include/media/v4l2-mem2mem.h
884
struct v4l2_requestbuffers *rb);
include/sound/hdaudio.h
332
struct snd_dma_buffer rb;
include/video/omapfb_dss.h
289
s16 rr, rg, rb;
kernel/bpf/range_tree.c
102
rb_link_node(&rn->rb_range_size, rb, link);
kernel/bpf/range_tree.c
44
static struct range_node *rb_to_range_node(struct rb_node *rb)
kernel/bpf/range_tree.c
46
return rb_entry(rb, struct range_node, rb_range_size);
kernel/bpf/range_tree.c
57
struct rb_node *rb = rt->range_size_root.rb_root.rb_node;
kernel/bpf/range_tree.c
60
while (rb) {
kernel/bpf/range_tree.c
61
struct range_node *rn = rb_to_range_node(rb);
kernel/bpf/range_tree.c
65
rb = rb->rb_right;
kernel/bpf/range_tree.c
67
rb = rb->rb_left;
kernel/bpf/range_tree.c
88
struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
kernel/bpf/range_tree.c
93
rb = *link;
kernel/bpf/range_tree.c
94
if (size > rn_size(rb_to_range_node(rb))) {
kernel/bpf/range_tree.c
95
link = &rb->rb_left;
kernel/bpf/range_tree.c
97
link = &rb->rb_right;
kernel/bpf/ringbuf.c
101
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
138
rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
kernel/bpf/ringbuf.c
140
if (rb) {
kernel/bpf/ringbuf.c
142
rb->pages = pages;
kernel/bpf/ringbuf.c
143
rb->nr_pages = nr_pages;
kernel/bpf/ringbuf.c
144
return rb;
kernel/bpf/ringbuf.c
156
struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work);
kernel/bpf/ringbuf.c
158
wake_up_all(&rb->waitq);
kernel/bpf/ringbuf.c
174
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
176
rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
kernel/bpf/ringbuf.c
177
if (!rb)
kernel/bpf/ringbuf.c
180
raw_res_spin_lock_init(&rb->spinlock);
kernel/bpf/ringbuf.c
181
atomic_set(&rb->busy, 0);
kernel/bpf/ringbuf.c
182
init_waitqueue_head(&rb->waitq);
kernel/bpf/ringbuf.c
183
init_irq_work(&rb->work, bpf_ringbuf_notify);
kernel/bpf/ringbuf.c
185
rb->mask = data_sz - 1;
kernel/bpf/ringbuf.c
186
rb->consumer_pos = 0;
kernel/bpf/ringbuf.c
187
rb->producer_pos = 0;
kernel/bpf/ringbuf.c
188
rb->pending_pos = 0;
kernel/bpf/ringbuf.c
189
rb->overwrite_mode = overwrite_mode;
kernel/bpf/ringbuf.c
191
return rb;
kernel/bpf/ringbuf.c
219
rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node, overwrite_mode);
kernel/bpf/ringbuf.c
220
if (!rb_map->rb) {
kernel/bpf/ringbuf.c
228
static void bpf_ringbuf_free(struct bpf_ringbuf *rb)
kernel/bpf/ringbuf.c
230
irq_work_sync(&rb->work);
kernel/bpf/ringbuf.c
235
struct page **pages = rb->pages;
kernel/bpf/ringbuf.c
236
int i, nr_pages = rb->nr_pages;
kernel/bpf/ringbuf.c
238
vunmap(rb);
kernel/bpf/ringbuf.c
249
bpf_ringbuf_free(rb_map->rb);
kernel/bpf/ringbuf.c
287
return remap_vmalloc_range(vma, rb_map->rb,
kernel/bpf/ringbuf.c
306
return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF);
kernel/bpf/ringbuf.c
315
static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb)
kernel/bpf/ringbuf.c
319
cons_pos = smp_load_acquire(&rb->consumer_pos);
kernel/bpf/ringbuf.c
321
if (unlikely(rb->overwrite_mode)) {
kernel/bpf/ringbuf.c
322
over_pos = smp_load_acquire(&rb->overwrite_pos);
kernel/bpf/ringbuf.c
323
prod_pos = smp_load_acquire(&rb->producer_pos);
kernel/bpf/ringbuf.c
326
prod_pos = smp_load_acquire(&rb->producer_pos);
kernel/bpf/ringbuf.c
331
static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb)
kernel/bpf/ringbuf.c
333
return rb->mask + 1;
kernel/bpf/ringbuf.c
342
poll_wait(filp, &rb_map->rb->waitq, pts);
kernel/bpf/ringbuf.c
344
if (ringbuf_avail_data_sz(rb_map->rb))
kernel/bpf/ringbuf.c
355
poll_wait(filp, &rb_map->rb->waitq, pts);
kernel/bpf/ringbuf.c
357
if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb))
kernel/bpf/ringbuf.c
364
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
369
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
kernel/bpf/ringbuf.c
370
usage += (u64)rb->nr_pages << PAGE_SHIFT;
kernel/bpf/ringbuf.c
413
static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb,
kernel/bpf/ringbuf.c
416
return ((void *)hdr - (void *)rb) >> PAGE_SHIFT;
kernel/bpf/ringbuf.c
431
static bool bpf_ringbuf_has_space(const struct bpf_ringbuf *rb,
kernel/bpf/ringbuf.c
440
if (new_prod_pos - pend_pos > rb->mask)
kernel/bpf/ringbuf.c
444
if (unlikely(rb->overwrite_mode))
kernel/bpf/ringbuf.c
451
if (new_prod_pos - cons_pos > rb->mask)
kernel/bpf/ringbuf.c
463
static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
kernel/bpf/ringbuf.c
473
if (len > ringbuf_total_data_sz(rb))
kernel/bpf/ringbuf.c
476
cons_pos = smp_load_acquire(&rb->consumer_pos);
kernel/bpf/ringbuf.c
478
if (raw_res_spin_lock_irqsave(&rb->spinlock, flags))
kernel/bpf/ringbuf.c
481
pend_pos = rb->pending_pos;
kernel/bpf/ringbuf.c
482
prod_pos = rb->producer_pos;
kernel/bpf/ringbuf.c
486
hdr = (void *)rb->data + (pend_pos & rb->mask);
kernel/bpf/ringbuf.c
492
rb->pending_pos = pend_pos;
kernel/bpf/ringbuf.c
494
if (!bpf_ringbuf_has_space(rb, new_prod_pos, cons_pos, pend_pos)) {
kernel/bpf/ringbuf.c
495
raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
kernel/bpf/ringbuf.c
504
if (unlikely(rb->overwrite_mode)) {
kernel/bpf/ringbuf.c
505
over_pos = rb->overwrite_pos;
kernel/bpf/ringbuf.c
506
while (new_prod_pos - over_pos > rb->mask) {
kernel/bpf/ringbuf.c
507
hdr = (void *)rb->data + (over_pos & rb->mask);
kernel/bpf/ringbuf.c
524
WRITE_ONCE(rb->overwrite_pos, over_pos);
kernel/bpf/ringbuf.c
527
hdr = (void *)rb->data + (prod_pos & rb->mask);
kernel/bpf/ringbuf.c
528
pg_off = bpf_ringbuf_rec_pg_off(rb, hdr);
kernel/bpf/ringbuf.c
533
smp_store_release(&rb->producer_pos, new_prod_pos);
kernel/bpf/ringbuf.c
535
raw_res_spin_unlock_irqrestore(&rb->spinlock, flags);
kernel/bpf/ringbuf.c
548
return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size);
kernel/bpf/ringbuf.c
563
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
567
rb = bpf_ringbuf_restore_from_rec(hdr);
kernel/bpf/ringbuf.c
578
rec_pos = (void *)hdr - (void *)rb->data;
kernel/bpf/ringbuf.c
579
cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask;
kernel/bpf/ringbuf.c
582
irq_work_queue(&rb->work);
kernel/bpf/ringbuf.c
584
irq_work_queue(&rb->work);
kernel/bpf/ringbuf.c
623
rec = __bpf_ringbuf_reserve(rb_map->rb, size);
kernel/bpf/ringbuf.c
643
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
645
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
kernel/bpf/ringbuf.c
649
return ringbuf_avail_data_sz(rb);
kernel/bpf/ringbuf.c
651
return ringbuf_total_data_sz(rb);
kernel/bpf/ringbuf.c
653
return smp_load_acquire(&rb->consumer_pos);
kernel/bpf/ringbuf.c
655
return smp_load_acquire(&rb->producer_pos);
kernel/bpf/ringbuf.c
657
return smp_load_acquire(&rb->overwrite_pos);
kernel/bpf/ringbuf.c
690
sample = __bpf_ringbuf_reserve(rb_map->rb, size);
kernel/bpf/ringbuf.c
748
static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size)
kernel/bpf/ringbuf.c
755
prod_pos = smp_load_acquire(&rb->producer_pos);
kernel/bpf/ringbuf.c
760
cons_pos = smp_load_acquire(&rb->consumer_pos);
kernel/bpf/ringbuf.c
764
hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask));
kernel/bpf/ringbuf.c
776
if (total_len > ringbuf_total_data_sz(rb))
kernel/bpf/ringbuf.c
790
smp_store_release(&rb->consumer_pos, cons_pos + total_len);
kernel/bpf/ringbuf.c
797
*sample = (void *)((uintptr_t)rb->data +
kernel/bpf/ringbuf.c
798
(uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask));
kernel/bpf/ringbuf.c
803
static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags)
kernel/bpf/ringbuf.c
812
consumer_pos = rb->consumer_pos;
kernel/bpf/ringbuf.c
814
smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size);
kernel/bpf/ringbuf.c
820
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
829
rb = container_of(map, struct bpf_ringbuf_map, map)->rb;
kernel/bpf/ringbuf.c
832
if (!atomic_try_cmpxchg(&rb->busy, &busy, 1))
kernel/bpf/ringbuf.c
84
struct bpf_ringbuf *rb;
kernel/bpf/ringbuf.c
841
err = __bpf_user_ringbuf_peek(rb, &sample, &size);
kernel/bpf/ringbuf.c
856
__bpf_user_ringbuf_sample_release(rb, size, flags);
kernel/bpf/ringbuf.c
864
atomic_set_release(&rb->busy, 0);
kernel/bpf/ringbuf.c
867
irq_work_queue(&rb->work);
kernel/bpf/ringbuf.c
869
irq_work_queue(&rb->work);
kernel/events/core.c
13643
struct perf_buffer *rb = NULL;
kernel/events/core.c
13705
rb = ring_buffer_get(output_event);
kernel/events/core.c
13706
if (!rb)
kernel/events/core.c
13710
if (!refcount_read(&rb->mmap_count)) {
kernel/events/core.c
13711
ring_buffer_put(rb);
kernel/events/core.c
13716
ring_buffer_attach(event, rb);
kernel/events/core.c
5310
struct perf_buffer *rb);
kernel/events/core.c
5817
if (event->rb) {
kernel/events/core.c
6283
struct perf_buffer *rb;
kernel/events/core.c
6306
rb = event->rb;
kernel/events/core.c
6307
if (rb)
kernel/events/core.c
6308
events = atomic_xchg(&rb->poll, 0);
kernel/events/core.c
6673
struct perf_buffer *rb;
kernel/events/core.c
6676
rb = rcu_dereference(event->rb);
kernel/events/core.c
6677
if (!rb || !rb->nr_pages) {
kernel/events/core.c
6681
rb_toggle_paused(rb, !!arg);
kernel/events/core.c
6797
struct perf_buffer *rb;
kernel/events/core.c
6800
rb = rcu_dereference(event->rb);
kernel/events/core.c
6801
if (!rb)
kernel/events/core.c
6804
userpg = rb->user_page;
kernel/events/core.c
6810
userpg->data_size = perf_data_size(rb);
kernel/events/core.c
6829
struct perf_buffer *rb;
kernel/events/core.c
6833
rb = rcu_dereference(event->rb);
kernel/events/core.c
6834
if (!rb)
kernel/events/core.c
6852
userpg = rb->user_page;
kernel/events/core.c
6878
struct perf_buffer *rb)
kernel/events/core.c
6885
if (event->rb) {
kernel/events/core.c
6892
old_rb = event->rb;
kernel/events/core.c
6901
if (rb) {
kernel/events/core.c
6907
spin_lock_irqsave(&rb->event_lock, flags);
kernel/events/core.c
6908
list_add_rcu(&event->rb_entry, &rb->event_list);
kernel/events/core.c
6909
spin_unlock_irqrestore(&rb->event_lock, flags);
kernel/events/core.c
6925
rcu_assign_pointer(event->rb, rb);
kernel/events/core.c
6940
struct perf_buffer *rb;
kernel/events/core.c
6946
rb = rcu_dereference(event->rb);
kernel/events/core.c
6947
if (rb) {
kernel/events/core.c
6948
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
kernel/events/core.c
6956
struct perf_buffer *rb;
kernel/events/core.c
6962
rb = rcu_dereference(event->rb);
kernel/events/core.c
6963
if (rb) {
kernel/events/core.c
6964
if (!refcount_inc_not_zero(&rb->refcount))
kernel/events/core.c
6965
rb = NULL;
kernel/events/core.c
6969
return rb;
kernel/events/core.c
6972
void ring_buffer_put(struct perf_buffer *rb)
kernel/events/core.c
6974
if (!refcount_dec_and_test(&rb->refcount))
kernel/events/core.c
6977
WARN_ON_ONCE(!list_empty(&rb->event_list));
kernel/events/core.c
6979
call_rcu(&rb->rcu_head, rb_free_rcu);
kernel/events/core.c
7000
refcount_inc(&event->rb->mmap_count);
kernel/events/core.c
7003
refcount_inc(&event->rb->aux_mmap_count);
kernel/events/core.c
7023
struct perf_buffer *rb = ring_buffer_get(event);
kernel/events/core.c
7024
struct user_struct *mmap_user = rb->mmap_user;
kernel/events/core.c
7025
int mmap_locked = rb->mmap_locked;
kernel/events/core.c
7026
unsigned long size = perf_data_size(rb);
kernel/events/core.c
7037
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
kernel/events/core.c
7038
refcount_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
kernel/events/core.c
7048
atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
kernel/events/core.c
7049
atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
kernel/events/core.c
7052
rb_free_aux(rb);
kernel/events/core.c
7053
WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
kernel/events/core.c
7055
mutex_unlock(&rb->aux_mutex);
kernel/events/core.c
7058
if (refcount_dec_and_test(&rb->mmap_count))
kernel/events/core.c
7078
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
kernel/events/core.c
7099
if (event->rb == rb)
kernel/events/core.c
7128
ring_buffer_put(rb); /* could be last */
kernel/events/core.c
7153
static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
kernel/events/core.c
7199
struct page *page = perf_mmap_to_page(rb, vma->vm_pgoff + pagenum);
kernel/events/core.c
7269
struct perf_buffer *rb;
kernel/events/core.c
7283
if (event->rb) {
kernel/events/core.c
7284
if (data_page_nr(event->rb) != nr_pages)
kernel/events/core.c
7296
if (refcount_inc_not_zero(&event->rb->mmap_count)) {
kernel/events/core.c
7320
rb = rb_alloc(nr_pages,
kernel/events/core.c
7324
if (!rb)
kernel/events/core.c
7327
refcount_set(&rb->mmap_count, 1);
kernel/events/core.c
7328
rb->mmap_user = get_current_user();
kernel/events/core.c
7329
rb->mmap_locked = extra;
kernel/events/core.c
7331
ring_buffer_attach(event, rb);
kernel/events/core.c
7348
struct perf_buffer *rb;
kernel/events/core.c
7351
rb = event->rb;
kernel/events/core.c
7352
if (!rb)
kernel/events/core.c
7355
guard(mutex)(&rb->aux_mutex);
kernel/events/core.c
7362
aux_offset = READ_ONCE(rb->user_page->aux_offset);
kernel/events/core.c
7363
aux_size = READ_ONCE(rb->user_page->aux_size);
kernel/events/core.c
7365
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
kernel/events/core.c
7372
if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
kernel/events/core.c
7379
if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
kernel/events/core.c
7385
if (!refcount_inc_not_zero(&rb->mmap_count))
kernel/events/core.c
7388
if (rb_has_aux(rb)) {
kernel/events/core.c
7389
refcount_inc(&rb->aux_mmap_count);
kernel/events/core.c
7393
refcount_dec(&rb->mmap_count);
kernel/events/core.c
7397
WARN_ON(!rb && event->rb);
kernel/events/core.c
7402
ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
kernel/events/core.c
7405
refcount_dec(&rb->mmap_count);
kernel/events/core.c
7409
refcount_set(&rb->aux_mmap_count, 1);
kernel/events/core.c
7410
rb->aux_mmap_locked = extra;
kernel/events/core.c
7483
ret = map_range(event->rb, vma);
kernel/events/core.c
7875
struct perf_buffer *rb;
kernel/events/core.c
7888
rb = ring_buffer_get(sampler);
kernel/events/core.c
7889
if (!rb)
kernel/events/core.c
7896
if (READ_ONCE(rb->aux_in_sampling)) {
kernel/events/core.c
7899
size = min_t(size_t, size, perf_aux_size(rb));
kernel/events/core.c
7902
ring_buffer_put(rb);
kernel/events/core.c
7908
static long perf_pmu_snapshot_aux(struct perf_buffer *rb,
kernel/events/core.c
7930
WRITE_ONCE(rb->aux_in_sampling, 1);
kernel/events/core.c
7936
WRITE_ONCE(rb->aux_in_sampling, 0);
kernel/events/core.c
7947
struct perf_buffer *rb;
kernel/events/core.c
7954
rb = ring_buffer_get(sampler);
kernel/events/core.c
7955
if (!rb)
kernel/events/core.c
7958
size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
kernel/events/core.c
7983
ring_buffer_put(rb);
kernel/events/core.c
8362
struct perf_buffer *rb = handle->rb;
kernel/events/core.c
8363
int events = local_inc_return(&rb->events);
kernel/events/core.c
8366
local_sub(wakeup_events, &rb->events);
kernel/events/core.c
8367
local_inc(&rb->wakeup);
kernel/events/core.c
8761
struct perf_buffer *rb;
kernel/events/core.c
8766
rb = ring_buffer_get(event);
kernel/events/core.c
8767
if (!rb)
kernel/events/core.c
8775
if (READ_ONCE(rb->aux_in_pause_resume))
kernel/events/core.c
8778
WRITE_ONCE(rb->aux_in_pause_resume, 1);
kernel/events/core.c
8782
WRITE_ONCE(rb->aux_in_pause_resume, 0);
kernel/events/core.c
8784
ring_buffer_put(rb);
kernel/events/core.c
9012
struct perf_buffer *rb;
kernel/events/core.c
9020
struct perf_buffer *rb = ro->rb;
kernel/events/core.c
9041
if (rcu_dereference(parent->rb) == rb)
kernel/events/core.c
9050
.rb = event->rb,
kernel/events/core.c
9070
list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
kernel/events/internal.h
108
static inline int page_order(struct perf_buffer *rb)
kernel/events/internal.h
110
return rb->page_order;
kernel/events/internal.h
115
static inline int page_order(struct perf_buffer *rb)
kernel/events/internal.h
121
static inline int data_page_nr(struct perf_buffer *rb)
kernel/events/internal.h
123
return rb->nr_pages << page_order(rb);
kernel/events/internal.h
126
static inline unsigned long perf_data_size(struct perf_buffer *rb)
kernel/events/internal.h
128
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
kernel/events/internal.h
131
static inline unsigned long perf_aux_size(struct perf_buffer *rb)
kernel/events/internal.h
133
return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
kernel/events/internal.h
151
struct perf_buffer *rb = handle->rb; \
kernel/events/internal.h
154
handle->page &= rb->nr_pages - 1; \
kernel/events/internal.h
155
handle->addr = rb->data_pages[handle->page]; \
kernel/events/internal.h
156
handle->size = PAGE_SIZE << page_order(rb); \
kernel/events/internal.h
63
extern void rb_free(struct perf_buffer *rb);
kernel/events/internal.h
67
struct perf_buffer *rb;
kernel/events/internal.h
69
rb = container_of(rcu_head, struct perf_buffer, rcu_head);
kernel/events/internal.h
70
rb_free(rb);
kernel/events/internal.h
73
static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
kernel/events/internal.h
75
if (!pause && rb->nr_pages)
kernel/events/internal.h
76
rb->paused = 0;
kernel/events/internal.h
78
rb->paused = 1;
kernel/events/internal.h
84
extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
kernel/events/internal.h
86
extern void rb_free_aux(struct perf_buffer *rb);
kernel/events/internal.h
88
extern void ring_buffer_put(struct perf_buffer *rb);
kernel/events/internal.h
90
static inline bool rb_has_aux(struct perf_buffer *rb)
kernel/events/internal.h
92
return !!rb->aux_nr_pages;
kernel/events/internal.h
99
perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
kernel/events/ring_buffer.c
114
WRITE_ONCE(rb->user_page->data_head, head);
kernel/events/ring_buffer.c
122
WRITE_ONCE(rb->nest, 0);
kernel/events/ring_buffer.c
129
if (unlikely(head != local_read(&rb->head))) {
kernel/events/ring_buffer.c
130
WRITE_ONCE(rb->nest, 1);
kernel/events/ring_buffer.c
134
if (handle->wakeup != local_read(&rb->wakeup))
kernel/events/ring_buffer.c
158
struct perf_buffer *rb;
kernel/events/ring_buffer.c
174
rb = rcu_dereference(event->rb);
kernel/events/ring_buffer.c
175
if (unlikely(!rb))
kernel/events/ring_buffer.c
178
if (unlikely(rb->paused)) {
kernel/events/ring_buffer.c
179
if (rb->nr_pages) {
kernel/events/ring_buffer.c
180
local_inc(&rb->lost);
kernel/events/ring_buffer.c
186
handle->rb = rb;
kernel/events/ring_buffer.c
190
have_lost = local_read(&rb->lost);
kernel/events/ring_buffer.c
199
offset = local_read(&rb->head);
kernel/events/ring_buffer.c
202
tail = READ_ONCE(rb->user_page->data_tail);
kernel/events/ring_buffer.c
203
if (!rb->overwrite) {
kernel/events/ring_buffer.c
205
perf_data_size(rb),
kernel/events/ring_buffer.c
22
atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM);
kernel/events/ring_buffer.c
226
} while (!local_try_cmpxchg(&rb->head, &offset, head));
kernel/events/ring_buffer.c
238
if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
kernel/events/ring_buffer.c
239
local_add(rb->watermark, &rb->wakeup);
kernel/events/ring_buffer.c
241
page_shift = PAGE_SHIFT + page_order(rb);
kernel/events/ring_buffer.c
243
handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
kernel/events/ring_buffer.c
245
handle->addr = rb->data_pages[handle->page] + offset;
kernel/events/ring_buffer.c
253
lost_event.lost = local_xchg(&rb->lost, 0);
kernel/events/ring_buffer.c
264
local_inc(&rb->lost);
kernel/events/ring_buffer.c
315
ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
kernel/events/ring_buffer.c
317
long max_size = perf_data_size(rb);
kernel/events/ring_buffer.c
320
rb->watermark = min(max_size, watermark);
kernel/events/ring_buffer.c
322
if (!rb->watermark)
kernel/events/ring_buffer.c
323
rb->watermark = max_size / 2;
kernel/events/ring_buffer.c
326
rb->overwrite = 0;
kernel/events/ring_buffer.c
328
rb->overwrite = 1;
kernel/events/ring_buffer.c
330
refcount_set(&rb->refcount, 1);
kernel/events/ring_buffer.c
332
INIT_LIST_HEAD(&rb->event_list);
kernel/events/ring_buffer.c
333
spin_lock_init(&rb->event_lock);
kernel/events/ring_buffer.c
339
if (!rb->nr_pages)
kernel/events/ring_buffer.c
340
rb->paused = 1;
kernel/events/ring_buffer.c
342
mutex_init(&rb->aux_mutex);
kernel/events/ring_buffer.c
377
struct perf_buffer *rb;
kernel/events/ring_buffer.c
388
rb = ring_buffer_get(output_event);
kernel/events/ring_buffer.c
389
if (!rb)
kernel/events/ring_buffer.c
392
if (!rb_has_aux(rb))
kernel/events/ring_buffer.c
403
if (!refcount_read(&rb->aux_mmap_count))
kernel/events/ring_buffer.c
406
if (!refcount_inc_not_zero(&rb->aux_refcount))
kernel/events/ring_buffer.c
409
nest = READ_ONCE(rb->aux_nest);
kernel/events/ring_buffer.c
417
WRITE_ONCE(rb->aux_nest, nest + 1);
kernel/events/ring_buffer.c
419
aux_head = rb->aux_head;
kernel/events/ring_buffer.c
42
struct perf_buffer *rb = handle->rb;
kernel/events/ring_buffer.c
421
handle->rb = rb;
kernel/events/ring_buffer.c
432
if (!rb->aux_overwrite) {
kernel/events/ring_buffer.c
433
aux_tail = READ_ONCE(rb->user_page->aux_tail);
kernel/events/ring_buffer.c
434
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
kernel/events/ring_buffer.c
435
if (aux_head - aux_tail < perf_aux_size(rb))
kernel/events/ring_buffer.c
436
handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
kernel/events/ring_buffer.c
446
WRITE_ONCE(rb->aux_nest, 0);
kernel/events/ring_buffer.c
451
return handle->rb->aux_priv;
kernel/events/ring_buffer.c
455
rb_free_aux(rb);
kernel/events/ring_buffer.c
458
ring_buffer_put(rb);
kernel/events/ring_buffer.c
465
static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
kernel/events/ring_buffer.c
467
if (rb->aux_overwrite)
kernel/events/ring_buffer.c
470
if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
kernel/events/ring_buffer.c
471
rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
kernel/events/ring_buffer.c
491
struct perf_buffer *rb = handle->rb;
kernel/events/ring_buffer.c
495
if (rb->aux_overwrite) {
kernel/events/ring_buffer.c
499
rb->aux_head = aux_head;
kernel/events/ring_buffer.c
50
(*(volatile unsigned int *)&rb->nest)++;
kernel/events/ring_buffer.c
503
aux_head = rb->aux_head;
kernel/events/ring_buffer.c
504
rb->aux_head += size;
kernel/events/ring_buffer.c
51
handle->wakeup = local_read(&rb->wakeup);
kernel/events/ring_buffer.c
523
WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
kernel/events/ring_buffer.c
524
if (rb_need_aux_wakeup(rb))
kernel/events/ring_buffer.c
535
WRITE_ONCE(rb->aux_nest, 0);
kernel/events/ring_buffer.c
537
rb_free_aux(rb);
kernel/events/ring_buffer.c
538
ring_buffer_put(rb);
kernel/events/ring_buffer.c
548
struct perf_buffer *rb = handle->rb;
kernel/events/ring_buffer.c
553
rb->aux_head += size;
kernel/events/ring_buffer.c
555
WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
kernel/events/ring_buffer.c
556
if (rb_need_aux_wakeup(rb)) {
kernel/events/ring_buffer.c
558
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
kernel/events/ring_buffer.c
56
struct perf_buffer *rb = handle->rb;
kernel/events/ring_buffer.c
561
handle->head = rb->aux_head;
kernel/events/ring_buffer.c
574
return handle->rb->aux_priv;
kernel/events/ring_buffer.c
585
struct perf_buffer *rb = aux_handle->rb;
kernel/events/ring_buffer.c
589
from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
kernel/events/ring_buffer.c
590
to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
kernel/events/ring_buffer.c
599
addr = rb->aux_pages[from >> PAGE_SHIFT];
kernel/events/ring_buffer.c
608
from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
kernel/events/ring_buffer.c
64
nest = READ_ONCE(rb->nest);
kernel/events/ring_buffer.c
642
static void rb_free_aux_page(struct perf_buffer *rb, int idx)
kernel/events/ring_buffer.c
644
struct page *page = virt_to_page(rb->aux_pages[idx]);
kernel/events/ring_buffer.c
650
static void __rb_free_aux(struct perf_buffer *rb)
kernel/events/ring_buffer.c
66
WRITE_ONCE(rb->nest, nest - 1);
kernel/events/ring_buffer.c
662
if (rb->aux_priv) {
kernel/events/ring_buffer.c
663
rb->free_aux(rb->aux_priv);
kernel/events/ring_buffer.c
664
rb->free_aux = NULL;
kernel/events/ring_buffer.c
665
rb->aux_priv = NULL;
kernel/events/ring_buffer.c
668
if (rb->aux_nr_pages) {
kernel/events/ring_buffer.c
669
for (pg = 0; pg < rb->aux_nr_pages; pg++)
kernel/events/ring_buffer.c
670
rb_free_aux_page(rb, pg);
kernel/events/ring_buffer.c
672
kfree(rb->aux_pages);
kernel/events/ring_buffer.c
673
rb->aux_nr_pages = 0;
kernel/events/ring_buffer.c
677
int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
kernel/events/ring_buffer.c
731
rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
kernel/events/ring_buffer.c
733
if (!rb->aux_pages)
kernel/events/ring_buffer.c
736
rb->free_aux = event->pmu->free_aux;
kernel/events/ring_buffer.c
737
for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
kernel/events/ring_buffer.c
741
order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
kernel/events/ring_buffer.c
746
for (last = rb->aux_nr_pages + (1 << page_private(page));
kernel/events/ring_buffer.c
747
last > rb->aux_nr_pages; rb->aux_nr_pages++)
kernel/events/ring_buffer.c
748
rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
kernel/events/ring_buffer.c
759
struct page *page = virt_to_page(rb->aux_pages[0]);
kernel/events/ring_buffer.c
765
rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
kernel/events/ring_buffer.c
767
if (!rb->aux_priv)
kernel/events/ring_buffer.c
778
refcount_set(&rb->aux_refcount, 1);
kernel/events/ring_buffer.c
780
rb->aux_overwrite = overwrite;
kernel/events/ring_buffer.c
781
rb->aux_watermark = watermark;
kernel/events/ring_buffer.c
785
rb->aux_pgoff = pgoff;
kernel/events/ring_buffer.c
787
__rb_free_aux(rb);
kernel/events/ring_buffer.c
792
void rb_free_aux(struct perf_buffer *rb)
kernel/events/ring_buffer.c
794
if (refcount_dec_and_test(&rb->aux_refcount))
kernel/events/ring_buffer.c
795
__rb_free_aux(rb);
kernel/events/ring_buffer.c
80
head = local_read(&rb->head);
kernel/events/ring_buffer.c
805
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
kernel/events/ring_buffer.c
807
if (pgoff > rb->nr_pages)
kernel/events/ring_buffer.c
811
return virt_to_page(rb->user_page);
kernel/events/ring_buffer.c
813
return virt_to_page(rb->data_pages[pgoff - 1]);
kernel/events/ring_buffer.c
838
struct perf_buffer *rb;
kernel/events/ring_buffer.c
849
rb = kzalloc_node(size, GFP_KERNEL, node);
kernel/events/ring_buffer.c
850
if (!rb)
kernel/events/ring_buffer.c
853
rb->user_page = perf_mmap_alloc_page(cpu);
kernel/events/ring_buffer.c
854
if (!rb->user_page)
kernel/events/ring_buffer.c
858
rb->data_pages[i] = perf_mmap_alloc_page(cpu);
kernel/events/ring_buffer.c
859
if (!rb->data_pages[i])
kernel/events/ring_buffer.c
863
rb->nr_pages = nr_pages;
kernel/events/ring_buffer.c
865
ring_buffer_init(rb, watermark, flags);
kernel/events/ring_buffer.c
867
return rb;
kernel/events/ring_buffer.c
871
perf_mmap_free_page(rb->data_pages[i]);
kernel/events/ring_buffer.c
873
perf_mmap_free_page(rb->user_page);
kernel/events/ring_buffer.c
876
kfree(rb);
kernel/events/ring_buffer.c
882
void rb_free(struct perf_buffer *rb)
kernel/events/ring_buffer.c
886
perf_mmap_free_page(rb->user_page);
kernel/events/ring_buffer.c
887
for (i = 0; i < rb->nr_pages; i++)
kernel/events/ring_buffer.c
888
perf_mmap_free_page(rb->data_pages[i]);
kernel/events/ring_buffer.c
889
kfree(rb);
kernel/events/ring_buffer.c
894
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
kernel/events/ring_buffer.c
897
if (pgoff > data_page_nr(rb))
kernel/events/ring_buffer.c
900
return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
kernel/events/ring_buffer.c
905
struct perf_buffer *rb;
kernel/events/ring_buffer.c
907
rb = container_of(work, struct perf_buffer, work);
kernel/events/ring_buffer.c
909
vfree(rb->user_page);
kernel/events/ring_buffer.c
910
kfree(rb);
kernel/events/ring_buffer.c
913
void rb_free(struct perf_buffer *rb)
kernel/events/ring_buffer.c
915
schedule_work(&rb->work);
kernel/events/ring_buffer.c
920
struct perf_buffer *rb;
kernel/events/ring_buffer.c
929
rb = kzalloc_node(size, GFP_KERNEL, node);
kernel/events/ring_buffer.c
930
if (!rb)
kernel/events/ring_buffer.c
933
INIT_WORK(&rb->work, rb_free_work);
kernel/events/ring_buffer.c
939
rb->user_page = all_buf;
kernel/events/ring_buffer.c
940
rb->data_pages[0] = all_buf + PAGE_SIZE;
kernel/events/ring_buffer.c
942
rb->nr_pages = 1;
kernel/events/ring_buffer.c
943
rb->page_order = ilog2(nr_pages);
kernel/events/ring_buffer.c
946
ring_buffer_init(rb, watermark, flags);
kernel/events/ring_buffer.c
948
return rb;
kernel/events/ring_buffer.c
951
kfree(rb);
kernel/events/ring_buffer.c
960
perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
kernel/events/ring_buffer.c
962
if (rb->aux_nr_pages) {
kernel/events/ring_buffer.c
964
if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
kernel/events/ring_buffer.c
968
if (pgoff >= rb->aux_pgoff) {
kernel/events/ring_buffer.c
969
int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
kernel/events/ring_buffer.c
970
return virt_to_page(rb->aux_pages[aux_pgoff]);
kernel/events/ring_buffer.c
974
return __perf_mmap_to_page(rb, pgoff);
kernel/printk/printk.c
1111
static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
kernel/printk/printk.c
1119
if (!prb_reserve(&e, rb, &dest_r))
kernel/printk/printk.c
2517
#define prb_read_valid(rb, seq, r) false
kernel/printk/printk.c
2518
#define prb_first_valid_seq(rb) 0
kernel/printk/printk.c
2519
#define prb_next_seq(rb) 0
kernel/printk/printk_ringbuffer.c
1053
static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
kernel/printk/printk_ringbuffer.c
1056
struct prb_data_ring *data_ring = &rb->text_data_ring;
kernel/printk/printk_ringbuffer.c
1089
!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
kernel/printk/printk_ringbuffer.c
1147
static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
kernel/printk/printk_ringbuffer.c
1150
struct prb_data_ring *data_ring = &rb->text_data_ring;
kernel/printk/printk_ringbuffer.c
1193
!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
kernel/printk/printk_ringbuffer.c
1421
bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.c
1424
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1447
e->rb = rb;
kernel/printk/printk_ringbuffer.c
1465
if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
kernel/printk/printk_ringbuffer.c
1471
r->text_buf = data_alloc(rb, r->text_buf_size,
kernel/printk/printk_ringbuffer.c
1474
if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
kernel/printk/printk_ringbuffer.c
1489
if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
kernel/printk/printk_ringbuffer.c
1495
r->text_buf = data_realloc(rb, r->text_buf_size,
kernel/printk/printk_ringbuffer.c
1503
e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
kernel/printk/printk_ringbuffer.c
1532
static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer.c
1534
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1545
return __ulseq_to_u64seq(rb, ulseq);
kernel/printk/printk_ringbuffer.c
1548
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
kernel/printk/printk_ringbuffer.c
1556
static void desc_update_last_finalized(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer.c
1558
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1559
u64 old_seq = desc_last_finalized_seq(rb);
kernel/printk/printk_ringbuffer.c
1570
while (_prb_read_valid(rb, &try_seq, NULL, NULL)) {
kernel/printk/printk_ringbuffer.c
1608
old_seq = __ulseq_to_u64seq(rb, oldval);
kernel/printk/printk_ringbuffer.c
1617
static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id)
kernel/printk/printk_ringbuffer.c
1619
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1625
desc_update_last_finalized(rb);
kernel/printk/printk_ringbuffer.c
1654
bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.c
1657
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1663
if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
kernel/printk/printk_ringbuffer.c
1674
if (!desc_reserve(rb, &id)) {
kernel/printk/printk_ringbuffer.c
1676
atomic_long_inc(&rb->fail);
kernel/printk/printk_ringbuffer.c
1696
e->rb = rb;
kernel/printk/printk_ringbuffer.c
1722
desc_make_final(rb, DESC_ID(id - 1));
kernel/printk/printk_ringbuffer.c
1724
r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
kernel/printk/printk_ringbuffer.c
1735
e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
kernel/printk/printk_ringbuffer.c
1748
struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1804
struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1816
desc_make_final(e->rb, e->id);
kernel/printk/printk_ringbuffer.c
1839
desc_update_last_finalized(e->rb);
kernel/printk/printk_ringbuffer.c
1964
static int prb_read(struct printk_ringbuffer *rb, u64 seq,
kernel/printk/printk_ringbuffer.c
1967
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
1993
if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
kernel/printk/printk_ringbuffer.c
2003
u64 prb_first_seq(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer.c
2005
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
2012
id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
kernel/printk/printk_ringbuffer.c
2062
u64 prb_next_reserve_seq(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer.c
2064
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
2081
last_finalized_seq = desc_last_finalized_seq(rb);
kernel/printk/printk_ringbuffer.c
2164
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
kernel/printk/printk_ringbuffer.c
2170
while ((err = prb_read(rb, *seq, r, line_count))) {
kernel/printk/printk_ringbuffer.c
2171
tail_seq = prb_first_seq(rb);
kernel/printk/printk_ringbuffer.c
2205
((*seq + 1) < prb_next_reserve_seq(rb))) {
kernel/printk/printk_ringbuffer.c
2239
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
kernel/printk/printk_ringbuffer.c
2242
return _prb_read_valid(rb, &seq, r, NULL);
kernel/printk/printk_ringbuffer.c
2270
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
kernel/printk/printk_ringbuffer.c
2277
return _prb_read_valid(rb, &seq, &r, line_count);
kernel/printk/printk_ringbuffer.c
2295
u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer.c
2299
if (!_prb_read_valid(rb, &seq, NULL, NULL))
kernel/printk/printk_ringbuffer.c
2322
u64 prb_next_seq(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer.c
2326
seq = desc_last_finalized_seq(rb);
kernel/printk/printk_ringbuffer.c
2342
while (_prb_read_valid(rb, &seq, NULL, NULL))
kernel/printk/printk_ringbuffer.c
2365
void prb_init(struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.c
2373
rb->desc_ring.count_bits = descbits;
kernel/printk/printk_ringbuffer.c
2374
rb->desc_ring.descs = descs;
kernel/printk/printk_ringbuffer.c
2375
rb->desc_ring.infos = infos;
kernel/printk/printk_ringbuffer.c
2376
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
kernel/printk/printk_ringbuffer.c
2377
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
kernel/printk/printk_ringbuffer.c
2378
atomic_long_set(&rb->desc_ring.last_finalized_seq, 0);
kernel/printk/printk_ringbuffer.c
2380
rb->text_data_ring.size_bits = textbits;
kernel/printk/printk_ringbuffer.c
2381
rb->text_data_ring.data = text_buf;
kernel/printk/printk_ringbuffer.c
2382
atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
kernel/printk/printk_ringbuffer.c
2383
atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
kernel/printk/printk_ringbuffer.c
2385
atomic_long_set(&rb->fail, 0);
kernel/printk/printk_ringbuffer.c
582
static bool data_make_reusable(struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.c
588
struct prb_data_ring *data_ring = &rb->text_data_ring;
kernel/printk/printk_ringbuffer.c
589
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
650
static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
kernel/printk/printk_ringbuffer.c
652
struct prb_data_ring *data_ring = &rb->text_data_ring;
kernel/printk/printk_ringbuffer.c
693
if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) {
kernel/printk/printk_ringbuffer.c
785
static bool desc_push_tail(struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.c
788
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
830
if (!data_push_tail(rb, desc.text_blk_lpos.next))
kernel/printk/printk_ringbuffer.c
893
static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
kernel/printk/printk_ringbuffer.c
895
struct prb_desc_ring *desc_ring = &rb->desc_ring;
kernel/printk/printk_ringbuffer.c
938
if (!desc_push_tail(rb, id_prev_wrap))
kernel/printk/printk_ringbuffer.h
114
struct printk_ringbuffer *rb;
kernel/printk/printk_ringbuffer.h
327
bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.h
329
bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.h
334
void prb_init(struct printk_ringbuffer *rb,
kernel/printk/printk_ringbuffer.h
377
#define prb_for_each_record(from, rb, s, r) \
kernel/printk/printk_ringbuffer.h
378
for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1)
kernel/printk/printk_ringbuffer.h
395
#define prb_for_each_info(from, rb, s, i, lc) \
kernel/printk/printk_ringbuffer.h
396
for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1)
kernel/printk/printk_ringbuffer.h
398
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
kernel/printk/printk_ringbuffer.h
400
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
kernel/printk/printk_ringbuffer.h
403
u64 prb_first_seq(struct printk_ringbuffer *rb);
kernel/printk/printk_ringbuffer.h
404
u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
kernel/printk/printk_ringbuffer.h
405
u64 prb_next_seq(struct printk_ringbuffer *rb);
kernel/printk/printk_ringbuffer.h
406
u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
kernel/printk/printk_ringbuffer.h
411
#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
kernel/printk/printk_ringbuffer.h
412
#define ULSEQ_MAX(rb) (-1)
kernel/printk/printk_ringbuffer.h
417
#define ULSEQ_MAX(rb) __u64seq_to_ulseq(prb_first_seq(rb) + 0x80000000UL)
kernel/printk/printk_ringbuffer.h
419
static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
kernel/printk/printk_ringbuffer.h
421
u64 rb_first_seq = prb_first_seq(rb);
kernel/printk/printk_ringbuffer_kunit_test.c
245
static inline void prbtest_prb_reinit(struct printk_ringbuffer *rb)
kernel/printk/printk_ringbuffer_kunit_test.c
247
prb_init(rb, rb->text_data_ring.data, rb->text_data_ring.size_bits, rb->desc_ring.descs,
kernel/printk/printk_ringbuffer_kunit_test.c
248
rb->desc_ring.count_bits, rb->desc_ring.infos);
lib/interval_tree.c
10
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
lib/rbtree.c
59
static inline void rb_set_black(struct rb_node *rb)
lib/rbtree.c
61
rb->__rb_parent_color += RB_BLACK;
lib/rbtree_test.c
100
new = &parent->rb.rb_left;
lib/rbtree_test.c
102
new = &parent->rb.rb_right;
lib/rbtree_test.c
106
rb_link_node(&node->rb, rb_parent, new);
lib/rbtree_test.c
107
rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
lib/rbtree_test.c
121
parent = rb_entry(rb_parent, struct test_node, rb);
lib/rbtree_test.c
125
new = &parent->rb.rb_left;
lib/rbtree_test.c
127
new = &parent->rb.rb_right;
lib/rbtree_test.c
133
rb_link_node(&node->rb, rb_parent, new);
lib/rbtree_test.c
134
rb_insert_augmented_cached(&node->rb, root,
lib/rbtree_test.c
141
rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
lib/rbtree_test.c
147
rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
lib/rbtree_test.c
159
static bool is_red(struct rb_node *rb)
lib/rbtree_test.c
161
return !(rb->__rb_parent_color & 1);
lib/rbtree_test.c
164
static int black_path_count(struct rb_node *rb)
lib/rbtree_test.c
167
for (count = 0; rb; rb = rb_parent(rb))
lib/rbtree_test.c
168
count += !is_red(rb);
lib/rbtree_test.c
176
rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
lib/rbtree_test.c
184
struct rb_node *rb;
lib/rbtree_test.c
186
for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb))
lib/rbtree_test.c
194
struct rb_node *rb;
lib/rbtree_test.c
198
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
lib/rbtree_test.c
199
struct test_node *node = rb_entry(rb, struct test_node, rb);
lib/rbtree_test.c
201
WARN_ON_ONCE(is_red(rb) &&
lib/rbtree_test.c
202
(!rb_parent(rb) || is_red(rb_parent(rb))));
lib/rbtree_test.c
204
blacks = black_path_count(rb);
lib/rbtree_test.c
206
WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
lib/rbtree_test.c
207
blacks != black_path_count(rb));
lib/rbtree_test.c
21
struct rb_node rb;
lib/rbtree_test.c
221
struct rb_node *rb;
lib/rbtree_test.c
224
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
lib/rbtree_test.c
225
struct test_node *node = rb_entry(rb, struct test_node, rb);
lib/rbtree_test.c
227
if (node->rb.rb_left) {
lib/rbtree_test.c
228
subtree = rb_entry(node->rb.rb_left, struct test_node,
lib/rbtree_test.c
229
rb)->augmented;
lib/rbtree_test.c
233
if (node->rb.rb_right) {
lib/rbtree_test.c
234
subtree = rb_entry(node->rb.rb_right, struct test_node,
lib/rbtree_test.c
235
rb)->augmented;
lib/rbtree_test.c
40
if (key < rb_entry(parent, struct test_node, rb)->key)
lib/rbtree_test.c
46
rb_link_node(&node->rb, parent, new);
lib/rbtree_test.c
47
rb_insert_color(&node->rb, &root->rb_root);
lib/rbtree_test.c
58
if (key < rb_entry(parent, struct test_node, rb)->key)
lib/rbtree_test.c
66
rb_link_node(&node->rb, parent, new);
lib/rbtree_test.c
67
rb_insert_color_cached(&node->rb, root, leftmost);
lib/rbtree_test.c
72
rb_erase(&node->rb, &root->rb_root);
lib/rbtree_test.c
77
rb_erase_cached(&node->rb, root);
lib/rbtree_test.c
84
struct test_node, rb, u32, augmented, NODE_VAL)
lib/rbtree_test.c
96
parent = rb_entry(rb_parent, struct test_node, rb);
mm/damon/stat.c
76
const struct damon_region *rb = *(const struct damon_region **)b;
mm/damon/stat.c
78
return damon_stat_idletime(ra) - damon_stat_idletime(rb);
mm/interval_tree.c
23
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
mm/interval_tree.c
38
if (!prev->shared.rb.rb_right) {
mm/interval_tree.c
40
link = &prev->shared.rb.rb_right;
mm/interval_tree.c
42
parent = rb_entry(prev->shared.rb.rb_right,
mm/interval_tree.c
43
struct vm_area_struct, shared.rb);
mm/interval_tree.c
46
while (parent->shared.rb.rb_left) {
mm/interval_tree.c
47
parent = rb_entry(parent->shared.rb.rb_left,
mm/interval_tree.c
48
struct vm_area_struct, shared.rb);
mm/interval_tree.c
52
link = &parent->shared.rb.rb_left;
mm/interval_tree.c
56
rb_link_node(&node->shared.rb, &parent->shared.rb, link);
mm/interval_tree.c
57
rb_insert_augmented(&node->shared.rb, &root->rb_root,
mm/interval_tree.c
71
INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
mm/kmemleak.c
423
struct rb_node *rb = object_tree(objflags)->rb_node;
mm/kmemleak.c
426
while (rb) {
mm/kmemleak.c
430
object = rb_entry(rb, struct kmemleak_object, rb_node);
mm/kmemleak.c
434
rb = object->rb_node.rb_left;
mm/kmemleak.c
436
rb = object->rb_node.rb_right;
mm/mmu_notifier.c
1052
if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) {
mm/mmu_notifier.c
1060
WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb));
mm/mmu_notifier.c
155
if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb))
mm/mmu_notifier.c
896
RB_CLEAR_NODE(&interval_sub->interval_tree.rb);
mm/nommu.c
1024
struct rb_node *rb;
mm/nommu.c
1085
for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
mm/nommu.c
1086
pregion = rb_entry(rb, struct vm_region, vm_rb);
mm/swapfile.c
2527
struct rb_node *rb = sis->swap_extent_root.rb_node;
mm/swapfile.c
2528
struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
mm/swapfile.c
2530
rb_erase(rb, &sis->swap_extent_root);
mm/swapfile.c
268
struct rb_node *rb = rb_first(&sis->swap_extent_root);
mm/swapfile.c
269
return rb_entry(rb, struct swap_extent, rb_node);
mm/swapfile.c
274
struct rb_node *rb = rb_next(&se->rb_node);
mm/swapfile.c
275
return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
mm/swapfile.c
319
struct rb_node *rb;
mm/swapfile.c
321
rb = sis->swap_extent_root.rb_node;
mm/swapfile.c
322
while (rb) {
mm/swapfile.c
323
se = rb_entry(rb, struct swap_extent, rb_node);
mm/swapfile.c
325
rb = rb->rb_left;
mm/swapfile.c
327
rb = rb->rb_right;
net/packet/af_packet.c
1128
const struct packet_ring_buffer *rb,
net/packet/af_packet.c
1132
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
net/packet/af_packet.c
1140
static int prb_previous_blk_num(struct packet_ring_buffer *rb)
net/packet/af_packet.c
1143
if (rb->prb_bdqc.kactive_blk_num)
net/packet/af_packet.c
1144
prev = rb->prb_bdqc.kactive_blk_num-1;
net/packet/af_packet.c
1146
prev = rb->prb_bdqc.knum_blocks-1;
net/packet/af_packet.c
1152
struct packet_ring_buffer *rb,
net/packet/af_packet.c
1155
unsigned int previous = prb_previous_blk_num(rb);
net/packet/af_packet.c
1156
return prb_lookup_block(po, rb, previous, status);
net/packet/af_packet.c
1160
struct packet_ring_buffer *rb,
net/packet/af_packet.c
1164
return packet_previous_frame(po, rb, status);
net/packet/af_packet.c
1166
return __prb_previous_block(po, rb, status);
net/packet/af_packet.c
1170
struct packet_ring_buffer *rb)
net/packet/af_packet.c
1175
return packet_increment_head(rb);
net/packet/af_packet.c
1185
struct packet_ring_buffer *rb,
net/packet/af_packet.c
1188
unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
net/packet/af_packet.c
1189
return packet_lookup_frame(po, rb, previous, status);
net/packet/af_packet.c
1197
static void packet_inc_pending(struct packet_ring_buffer *rb)
net/packet/af_packet.c
1199
this_cpu_inc(*rb->pending_refcnt);
net/packet/af_packet.c
1202
static void packet_dec_pending(struct packet_ring_buffer *rb)
net/packet/af_packet.c
1204
this_cpu_dec(*rb->pending_refcnt);
net/packet/af_packet.c
1207
static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
net/packet/af_packet.c
1213
if (rb->pending_refcnt == NULL)
net/packet/af_packet.c
1217
refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
net/packet/af_packet.c
195
struct packet_ring_buffer *rb,
net/packet/af_packet.c
4419
struct packet_ring_buffer *rb;
net/packet/af_packet.c
4426
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
net/packet/af_packet.c
4433
if (packet_read_pending(rb))
net/packet/af_packet.c
4442
if (unlikely(rb->pg_vec))
net/packet/af_packet.c
4472
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
net/packet/af_packet.c
4473
if (unlikely(rb->frames_per_block == 0))
net/packet/af_packet.c
4475
if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
net/packet/af_packet.c
4477
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
net/packet/af_packet.c
4490
init_prb_bdqc(po, rb, pg_vec, req_u);
net/packet/af_packet.c
4537
swap(rb->pg_vec, pg_vec);
net/packet/af_packet.c
4539
swap(rb->rx_owner_map, rx_owner_map);
net/packet/af_packet.c
4540
rb->frame_max = (req->tp_frame_nr - 1);
net/packet/af_packet.c
4541
rb->head = 0;
net/packet/af_packet.c
4542
rb->frame_size = req->tp_frame_size;
net/packet/af_packet.c
4545
swap(rb->pg_vec_order, order);
net/packet/af_packet.c
4546
swap(rb->pg_vec_len, req->tp_block_nr);
net/packet/af_packet.c
4548
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
net/packet/af_packet.c
4585
struct packet_ring_buffer *rb;
net/packet/af_packet.c
4596
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
net/packet/af_packet.c
4597
if (rb->pg_vec) {
net/packet/af_packet.c
4598
expected_size += rb->pg_vec_len
net/packet/af_packet.c
4599
* rb->pg_vec_pages
net/packet/af_packet.c
4612
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
net/packet/af_packet.c
4613
if (rb->pg_vec == NULL)
net/packet/af_packet.c
4616
for (i = 0; i < rb->pg_vec_len; i++) {
net/packet/af_packet.c
4618
void *kaddr = rb->pg_vec[i].buffer;
net/packet/af_packet.c
4621
for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
net/packet/af_packet.c
514
const struct packet_ring_buffer *rb,
net/packet/af_packet.c
521
pg_vec_pos = position / rb->frames_per_block;
net/packet/af_packet.c
522
frame_offset = position % rb->frames_per_block;
net/packet/af_packet.c
524
h.raw = rb->pg_vec[pg_vec_pos].buffer +
net/packet/af_packet.c
525
(frame_offset * rb->frame_size);
net/packet/af_packet.c
534
struct packet_ring_buffer *rb,
net/packet/af_packet.c
537
return packet_lookup_frame(po, rb, rb->head, status);
net/packet/af_packet.c
635
struct packet_ring_buffer *rb,
net/packet/af_packet.c
639
struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
net/packet/af_packet.c
975
static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
net/packet/af_packet.c
978
struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
net/sched/sch_cake.c
1494
u32 rb = cake_heap_get_backlog(q, r);
net/sched/sch_cake.c
1496
if (rb > mb) {
net/sched/sch_cake.c
1498
mb = rb;
net/sched/sch_htb.c
376
static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
net/sched/sch_htb.c
378
if (RB_EMPTY_NODE(rb)) {
net/sched/sch_htb.c
381
rb_erase(rb, root);
net/sched/sch_htb.c
382
RB_CLEAR_NODE(rb);
net/sunrpc/xprtrdma/rpc_rdma.c
536
struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
net/sunrpc/xprtrdma/rpc_rdma.c
548
ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
net/sunrpc/xprtrdma/rpc_rdma.c
560
struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
net/sunrpc/xprtrdma/rpc_rdma.c
563
sge->addr = rdmab_addr(rb);
net/sunrpc/xprtrdma/rpc_rdma.c
565
sge->lkey = rdmab_lkey(rb);
net/sunrpc/xprtrdma/rpc_rdma.c
567
ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
net/sunrpc/xprtrdma/rpc_rdma.c
579
struct rpcrdma_regbuf *rb = req->rl_sendbuf;
net/sunrpc/xprtrdma/rpc_rdma.c
581
if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
net/sunrpc/xprtrdma/rpc_rdma.c
584
sge->addr = rdmab_addr(rb);
net/sunrpc/xprtrdma/rpc_rdma.c
586
sge->lkey = rdmab_lkey(rb);
net/sunrpc/xprtrdma/rpc_rdma.c
588
ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
net/sunrpc/xprtrdma/rpc_rdma.c
600
struct rpcrdma_regbuf *rb = req->rl_sendbuf;
net/sunrpc/xprtrdma/rpc_rdma.c
611
sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
net/sunrpc/xprtrdma/rpc_rdma.c
613
if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
net/sunrpc/xprtrdma/rpc_rdma.c
617
sge->lkey = rdmab_lkey(rb);
net/sunrpc/xprtrdma/rpc_rdma.c
642
struct rpcrdma_regbuf *rb = req->rl_sendbuf;
net/sunrpc/xprtrdma/rpc_rdma.c
645
sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
net/sunrpc/xprtrdma/rpc_rdma.c
647
if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
net/sunrpc/xprtrdma/rpc_rdma.c
651
sge->lkey = rdmab_lkey(rb);
net/sunrpc/xprtrdma/transport.c
537
struct rpcrdma_regbuf *rb, size_t size,
net/sunrpc/xprtrdma/transport.c
540
if (unlikely(rdmab_length(rb) < size)) {
net/sunrpc/xprtrdma/transport.c
541
if (!rpcrdma_regbuf_realloc(rb, size, flags))
net/sunrpc/xprtrdma/verbs.c
1246
struct rpcrdma_regbuf *rb;
net/sunrpc/xprtrdma/verbs.c
1248
rb = kmalloc_node(sizeof(*rb), XPRTRDMA_GFP_FLAGS, node);
net/sunrpc/xprtrdma/verbs.c
1249
if (!rb)
net/sunrpc/xprtrdma/verbs.c
1251
rb->rg_data = kmalloc_node(size, XPRTRDMA_GFP_FLAGS, node);
net/sunrpc/xprtrdma/verbs.c
1252
if (!rb->rg_data) {
net/sunrpc/xprtrdma/verbs.c
1253
kfree(rb);
net/sunrpc/xprtrdma/verbs.c
1257
rb->rg_device = NULL;
net/sunrpc/xprtrdma/verbs.c
1258
rb->rg_direction = direction;
net/sunrpc/xprtrdma/verbs.c
1259
rb->rg_iov.length = size;
net/sunrpc/xprtrdma/verbs.c
1260
return rb;
net/sunrpc/xprtrdma/verbs.c
1278
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
net/sunrpc/xprtrdma/verbs.c
1286
rpcrdma_regbuf_dma_unmap(rb);
net/sunrpc/xprtrdma/verbs.c
1287
kfree(rb->rg_data);
net/sunrpc/xprtrdma/verbs.c
1289
rb->rg_data = buf;
net/sunrpc/xprtrdma/verbs.c
1290
rb->rg_iov.length = size;
net/sunrpc/xprtrdma/verbs.c
1302
struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/verbs.c
1306
if (rb->rg_direction == DMA_NONE)
net/sunrpc/xprtrdma/verbs.c
1309
rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
net/sunrpc/xprtrdma/verbs.c
1310
rdmab_length(rb), rb->rg_direction);
net/sunrpc/xprtrdma/verbs.c
1311
if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
net/sunrpc/xprtrdma/verbs.c
1312
trace_xprtrdma_dma_maperr(rdmab_addr(rb));
net/sunrpc/xprtrdma/verbs.c
1316
rb->rg_device = device;
net/sunrpc/xprtrdma/verbs.c
1317
rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
net/sunrpc/xprtrdma/verbs.c
1321
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/verbs.c
1323
if (!rb)
net/sunrpc/xprtrdma/verbs.c
1326
if (!rpcrdma_regbuf_is_mapped(rb))
net/sunrpc/xprtrdma/verbs.c
1329
ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
net/sunrpc/xprtrdma/verbs.c
1330
rb->rg_direction);
net/sunrpc/xprtrdma/verbs.c
1331
rb->rg_device = NULL;
net/sunrpc/xprtrdma/verbs.c
1334
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/verbs.c
1336
rpcrdma_regbuf_dma_unmap(rb);
net/sunrpc/xprtrdma/verbs.c
1337
if (rb)
net/sunrpc/xprtrdma/verbs.c
1338
kfree(rb->rg_data);
net/sunrpc/xprtrdma/verbs.c
1339
kfree(rb);
net/sunrpc/xprtrdma/verbs.c
82
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
net/sunrpc/xprtrdma/verbs.c
83
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
net/sunrpc/xprtrdma/verbs.c
861
struct rpcrdma_regbuf *rb;
net/sunrpc/xprtrdma/verbs.c
868
rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
net/sunrpc/xprtrdma/verbs.c
870
if (!rb)
net/sunrpc/xprtrdma/verbs.c
873
if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
net/sunrpc/xprtrdma/verbs.c
876
req->rl_rdmabuf = rb;
net/sunrpc/xprtrdma/verbs.c
877
xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
net/sunrpc/xprtrdma/verbs.c
881
rpcrdma_regbuf_free(rb);
net/sunrpc/xprtrdma/xprt_rdma.h
129
static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
131
return rb->rg_iov.addr;
net/sunrpc/xprtrdma/xprt_rdma.h
134
static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
136
return rb->rg_iov.length;
net/sunrpc/xprtrdma/xprt_rdma.h
139
static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
141
return rb->rg_iov.lkey;
net/sunrpc/xprtrdma/xprt_rdma.h
144
static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
146
return rb->rg_device;
net/sunrpc/xprtrdma/xprt_rdma.h
149
static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
151
return rb->rg_data;
net/sunrpc/xprtrdma/xprt_rdma.h
492
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
net/sunrpc/xprtrdma/xprt_rdma.h
495
struct rpcrdma_regbuf *rb);
net/sunrpc/xprtrdma/xprt_rdma.h
502
static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
504
return rb->rg_device != NULL;
net/sunrpc/xprtrdma/xprt_rdma.h
515
struct rpcrdma_regbuf *rb)
net/sunrpc/xprtrdma/xprt_rdma.h
517
if (likely(rpcrdma_regbuf_is_mapped(rb)))
net/sunrpc/xprtrdma/xprt_rdma.h
519
return __rpcrdma_regbuf_dma_map(r_xprt, rb);
sound/hda/core/controller.c
45
WARN_ON_ONCE(!bus->rb.area);
sound/hda/core/controller.c
49
bus->corb.addr = bus->rb.addr;
sound/hda/core/controller.c
50
bus->corb.buf = (__le32 *)bus->rb.area;
sound/hda/core/controller.c
69
bus->rirb.addr = bus->rb.addr + 2048;
sound/hda/core/controller.c
70
bus->rirb.buf = (__le32 *)(bus->rb.area + 2048);
sound/hda/core/controller.c
730
return snd_dma_alloc_pages(dma_type, bus->dev, PAGE_SIZE, &bus->rb);
sound/hda/core/controller.c
747
if (bus->rb.area)
sound/hda/core/controller.c
748
snd_dma_free_pages(&bus->rb);
sound/pci/lola/lola.c
348
chip->rb = snd_devm_alloc_pages(&chip->pci->dev, SNDRV_DMA_TYPE_DEV,
sound/pci/lola/lola.c
350
if (!chip->rb)
sound/pci/lola/lola.c
353
chip->corb.addr = chip->rb->addr;
sound/pci/lola/lola.c
354
chip->corb.buf = (__le32 *)chip->rb->area;
sound/pci/lola/lola.c
355
chip->rirb.addr = chip->rb->addr + 2048;
sound/pci/lola/lola.c
356
chip->rirb.buf = (__le32 *)(chip->rb->area + 2048);
sound/pci/lola/lola.h
331
struct snd_dma_buffer *rb;
sound/soc/codecs/tscs42xx.c
950
#define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb, \
sound/soc/codecs/tscs42xx.c
960
{R_PLLCTLB, rb, 0xFF}, \
sound/soc/meson/axg-spdifin.c
425
unsigned int rb =
sound/soc/meson/axg-spdifin.c
428
if (rb == SNDRV_PCM_RATE_KNOT)
sound/soc/meson/axg-spdifin.c
431
drv->capture.rates |= rb;
sound/soc/sof/intel/hda-stream.c
1045
if (bus->rb.area)
sound/soc/sof/intel/hda-stream.c
1046
snd_dma_free_pages(&bus->rb);
sound/soc/sof/intel/hda-stream.c
946
PAGE_SIZE, &bus->rb);
sound/soc/uniphier/aio-core.c
1039
(sub->swm->rb.map << CDA2D_CHMXAMODE_RSSEL_SHIFT);
sound/soc/uniphier/aio-core.c
1057
BIT(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1058
BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1064
BIT(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1076
CDA2D_RDPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1079
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1081
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1082
regmap_read(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), &pos_u);
sound/soc/uniphier/aio-core.c
1094
regmap_write(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), (u32)pos);
sound/soc/uniphier/aio-core.c
1095
regmap_write(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), (u32)(pos >> 32));
sound/soc/uniphier/aio-core.c
1096
regmap_write(r, CDA2D_RDPTRLOAD, BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1099
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &tmp);
sound/soc/uniphier/aio-core.c
1109
CDA2D_WRPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1112
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1114
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
sound/soc/uniphier/aio-core.c
1115
regmap_read(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map), &pos_u);
sound/soc/uniphier/aio-core.c
1127
regmap_write(r, CDA2D_RBMXWRPTR(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1129
regmap_write(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1131
regmap_write(r, CDA2D_WRPTRLOAD, BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1134
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &tmp);
sound/soc/uniphier/aio-core.c
1144
regmap_write(r, CDA2D_RBMXBTH(sub->swm->rb.map), th);
sound/soc/uniphier/aio-core.c
1145
regmap_write(r, CDA2D_RBMXRTH(sub->swm->rb.map), th);
sound/soc/uniphier/aio-core.c
1160
regmap_write(r, CDA2D_RBMXCNFG(sub->swm->rb.map), 0);
sound/soc/uniphier/aio-core.c
1161
regmap_write(r, CDA2D_RBMXBGNADRS(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1163
regmap_write(r, CDA2D_RBMXBGNADRSU(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1165
regmap_write(r, CDA2D_RBMXENDADRS(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1167
regmap_write(r, CDA2D_RBMXENDADRSU(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1170
regmap_write(r, CDA2D_RBADRSLOAD, BIT(sub->swm->rb.map));
sound/soc/uniphier/aio-core.c
1180
regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1187
regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1248
regmap_read(r, CDA2D_RBMXIR(sub->swm->rb.map), &ir);
sound/soc/uniphier/aio-core.c
1261
regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
1264
regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
sound/soc/uniphier/aio-core.c
205
regmap_write(r, A2RBNMAPCTR0(sub->swm->rb.hw),
sound/soc/uniphier/aio-core.c
206
MAPCTR0_EN | sub->swm->rb.map);
sound/soc/uniphier/aio-ld11.c
101
.rb = { 2, 2, },
sound/soc/uniphier/aio-ld11.c
114
.rb = { 3, 3, },
sound/soc/uniphier/aio-ld11.c
127
.rb = { 7, 5, },
sound/soc/uniphier/aio-ld11.c
142
.rb = { 8, 6, },
sound/soc/uniphier/aio-ld11.c
158
.rb = { 1, 1, },
sound/soc/uniphier/aio-ld11.c
172
.rb = { 1, 1, },
sound/soc/uniphier/aio-ld11.c
19
.rb = { 21, 14, },
sound/soc/uniphier/aio-ld11.c
32
.rb = { 22, 15, },
sound/soc/uniphier/aio-ld11.c
46
.rb = { 23, 16, },
sound/soc/uniphier/aio-ld11.c
60
.rb = { 26, 17, },
sound/soc/uniphier/aio-ld11.c
73
.rb = { 0, 0, },
sound/soc/uniphier/aio-ld11.c
87
.rb = { 0, 0, },
sound/soc/uniphier/aio-pxs2.c
101
.rb = { 6, 4, },
sound/soc/uniphier/aio-pxs2.c
114
.rb = { 7, 5, },
sound/soc/uniphier/aio-pxs2.c
127
.rb = { 7, 5, },
sound/soc/uniphier/aio-pxs2.c
19
.rb = { 16, 11, },
sound/soc/uniphier/aio-pxs2.c
33
.rb = { 17, 12, },
sound/soc/uniphier/aio-pxs2.c
47
.rb = { 0, 0, },
sound/soc/uniphier/aio-pxs2.c
61
.rb = { 1, 1, },
sound/soc/uniphier/aio-pxs2.c
75
.rb = { 2, 2, },
sound/soc/uniphier/aio-pxs2.c
88
.rb = { 6, 4, },
sound/soc/uniphier/aio.h
198
struct uniphier_aio_selector rb;
tools/include/linux/interval_tree_generic.h
150
struct rb_node *rb = node->ITRB.rb_right, *prev; \
tools/include/linux/interval_tree_generic.h
160
if (rb) { \
tools/include/linux/interval_tree_generic.h
161
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
tools/include/linux/interval_tree_generic.h
169
rb = rb_parent(&node->ITRB); \
tools/include/linux/interval_tree_generic.h
170
if (!rb) \
tools/include/linux/interval_tree_generic.h
173
node = rb_entry(rb, ITSTRUCT, ITRB); \
tools/include/linux/interval_tree_generic.h
174
rb = node->ITRB.rb_right; \
tools/include/linux/interval_tree_generic.h
175
} while (prev == rb); \
tools/include/linux/rbtree_augmented.h
155
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
tools/include/linux/rbtree_augmented.h
156
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
tools/include/linux/rbtree_augmented.h
157
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
tools/include/linux/rbtree_augmented.h
159
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
tools/include/linux/rbtree_augmented.h
161
rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
tools/include/linux/rbtree_augmented.h
164
static inline void rb_set_parent_color(struct rb_node *rb,
tools/include/linux/rbtree_augmented.h
167
rb->__rb_parent_color = (unsigned long)p + color;
tools/include/linux/rbtree_augmented.h
79
RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \
tools/include/linux/rbtree_augmented.h
81
while (rb != stop) { \
tools/include/linux/rbtree_augmented.h
82
RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
tools/include/linux/rbtree_augmented.h
85
rb = rb_parent(&node->RBFIELD); \
tools/lib/bpf/libbpf.h
1428
LIBBPF_API void ring_buffer__free(struct ring_buffer *rb);
tools/lib/bpf/libbpf.h
1429
LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
tools/lib/bpf/libbpf.h
1431
LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
tools/lib/bpf/libbpf.h
1432
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
tools/lib/bpf/libbpf.h
1433
LIBBPF_API int ring_buffer__consume_n(struct ring_buffer *rb, size_t n);
tools/lib/bpf/libbpf.h
1434
LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
tools/lib/bpf/libbpf.h
1447
LIBBPF_API struct ring *ring_buffer__ring(struct ring_buffer *rb,
tools/lib/bpf/libbpf.h
1556
LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size);
tools/lib/bpf/libbpf.h
1599
LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb,
tools/lib/bpf/libbpf.h
1612
LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample);
tools/lib/bpf/libbpf.h
1622
LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample);
tools/lib/bpf/libbpf.h
1629
LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb);
tools/lib/bpf/ringbuf.c
102
tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
tools/lib/bpf/ringbuf.c
105
rb->rings = tmp;
tools/lib/bpf/ringbuf.c
107
tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
tools/lib/bpf/ringbuf.c
110
rb->events = tmp;
tools/lib/bpf/ringbuf.c
115
rb->rings[rb->ring_cnt] = r;
tools/lib/bpf/ringbuf.c
123
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
tools/lib/bpf/ringbuf.c
136
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
tools/lib/bpf/ringbuf.c
142
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
tools/lib/bpf/ringbuf.c
150
r->data = tmp + rb->page_size;
tools/lib/bpf/ringbuf.c
152
e = &rb->events[rb->ring_cnt];
tools/lib/bpf/ringbuf.c
156
e->data.fd = rb->ring_cnt;
tools/lib/bpf/ringbuf.c
157
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
tools/lib/bpf/ringbuf.c
164
rb->ring_cnt++;
tools/lib/bpf/ringbuf.c
168
ringbuf_free_ring(rb, r);
tools/lib/bpf/ringbuf.c
172
void ring_buffer__free(struct ring_buffer *rb)
tools/lib/bpf/ringbuf.c
176
if (!rb)
tools/lib/bpf/ringbuf.c
179
for (i = 0; i < rb->ring_cnt; ++i)
tools/lib/bpf/ringbuf.c
180
ringbuf_free_ring(rb, rb->rings[i]);
tools/lib/bpf/ringbuf.c
181
if (rb->epoll_fd >= 0)
tools/lib/bpf/ringbuf.c
182
close(rb->epoll_fd);
tools/lib/bpf/ringbuf.c
184
free(rb->events);
tools/lib/bpf/ringbuf.c
185
free(rb->rings);
tools/lib/bpf/ringbuf.c
186
free(rb);
tools/lib/bpf/ringbuf.c
193
struct ring_buffer *rb;
tools/lib/bpf/ringbuf.c
199
rb = calloc(1, sizeof(*rb));
tools/lib/bpf/ringbuf.c
200
if (!rb)
tools/lib/bpf/ringbuf.c
203
rb->page_size = getpagesize();
tools/lib/bpf/ringbuf.c
205
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
tools/lib/bpf/ringbuf.c
206
if (rb->epoll_fd < 0) {
tools/lib/bpf/ringbuf.c
212
err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
tools/lib/bpf/ringbuf.c
216
return rb;
tools/lib/bpf/ringbuf.c
219
ring_buffer__free(rb);
tools/lib/bpf/ringbuf.c
287
int ring_buffer__consume_n(struct ring_buffer *rb, size_t n)
tools/lib/bpf/ringbuf.c
292
for (i = 0; i < rb->ring_cnt; i++) {
tools/lib/bpf/ringbuf.c
293
struct ring *ring = rb->rings[i];
tools/lib/bpf/ringbuf.c
312
int ring_buffer__consume(struct ring_buffer *rb)
tools/lib/bpf/ringbuf.c
317
for (i = 0; i < rb->ring_cnt; i++) {
tools/lib/bpf/ringbuf.c
318
struct ring *ring = rb->rings[i];
tools/lib/bpf/ringbuf.c
336
int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
tools/lib/bpf/ringbuf.c
341
cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
tools/lib/bpf/ringbuf.c
346
__u32 ring_id = rb->events[i].data.fd;
tools/lib/bpf/ringbuf.c
347
struct ring *ring = rb->rings[ring_id];
tools/lib/bpf/ringbuf.c
360
int ring_buffer__epoll_fd(const struct ring_buffer *rb)
tools/lib/bpf/ringbuf.c
362
return rb->epoll_fd;
tools/lib/bpf/ringbuf.c
365
struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx)
tools/lib/bpf/ringbuf.c
367
if (idx >= rb->ring_cnt)
tools/lib/bpf/ringbuf.c
370
return rb->rings[idx];
tools/lib/bpf/ringbuf.c
422
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
tools/lib/bpf/ringbuf.c
424
if (rb->consumer_pos) {
tools/lib/bpf/ringbuf.c
425
munmap(rb->consumer_pos, rb->page_size);
tools/lib/bpf/ringbuf.c
426
rb->consumer_pos = NULL;
tools/lib/bpf/ringbuf.c
428
if (rb->producer_pos) {
tools/lib/bpf/ringbuf.c
429
munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1));
tools/lib/bpf/ringbuf.c
430
rb->producer_pos = NULL;
tools/lib/bpf/ringbuf.c
434
void user_ring_buffer__free(struct user_ring_buffer *rb)
tools/lib/bpf/ringbuf.c
436
if (!rb)
tools/lib/bpf/ringbuf.c
439
user_ringbuf_unmap_ring(rb);
tools/lib/bpf/ringbuf.c
441
if (rb->epoll_fd >= 0)
tools/lib/bpf/ringbuf.c
442
close(rb->epoll_fd);
tools/lib/bpf/ringbuf.c
444
free(rb);
tools/lib/bpf/ringbuf.c
447
static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
tools/lib/bpf/ringbuf.c
471
rb->map_fd = map_fd;
tools/lib/bpf/ringbuf.c
472
rb->mask = info.max_entries - 1;
tools/lib/bpf/ringbuf.c
475
tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
tools/lib/bpf/ringbuf.c
482
rb->consumer_pos = tmp;
tools/lib/bpf/ringbuf.c
489
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
tools/lib/bpf/ringbuf.c
495
map_fd, rb->page_size);
tools/lib/bpf/ringbuf.c
503
rb->producer_pos = tmp;
tools/lib/bpf/ringbuf.c
504
rb->data = tmp + rb->page_size;
tools/lib/bpf/ringbuf.c
506
rb_epoll = &rb->event;
tools/lib/bpf/ringbuf.c
508
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
tools/lib/bpf/ringbuf.c
520
struct user_ring_buffer *rb;
tools/lib/bpf/ringbuf.c
526
rb = calloc(1, sizeof(*rb));
tools/lib/bpf/ringbuf.c
527
if (!rb)
tools/lib/bpf/ringbuf.c
530
rb->page_size = getpagesize();
tools/lib/bpf/ringbuf.c
532
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
tools/lib/bpf/ringbuf.c
533
if (rb->epoll_fd < 0) {
tools/lib/bpf/ringbuf.c
539
err = user_ringbuf_map(rb, map_fd);
tools/lib/bpf/ringbuf.c
543
return rb;
tools/lib/bpf/ringbuf.c
546
user_ring_buffer__free(rb);
tools/lib/bpf/ringbuf.c
550
static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard)
tools/lib/bpf/ringbuf.c
556
hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ;
tools/lib/bpf/ringbuf.c
557
hdr = rb->data + (hdr_offset & rb->mask);
tools/lib/bpf/ringbuf.c
569
void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample)
tools/lib/bpf/ringbuf.c
571
user_ringbuf_commit(rb, sample, true);
tools/lib/bpf/ringbuf.c
574
void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample)
tools/lib/bpf/ringbuf.c
576
user_ringbuf_commit(rb, sample, false);
tools/lib/bpf/ringbuf.c
579
void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
tools/lib/bpf/ringbuf.c
593
cons_pos = smp_load_acquire(rb->consumer_pos);
tools/lib/bpf/ringbuf.c
595
prod_pos = smp_load_acquire(rb->producer_pos);
tools/lib/bpf/ringbuf.c
597
max_size = rb->mask + 1;
tools/lib/bpf/ringbuf.c
60
static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
tools/lib/bpf/ringbuf.c
608
hdr = rb->data + (prod_pos & rb->mask);
tools/lib/bpf/ringbuf.c
615
smp_store_release(rb->producer_pos, prod_pos + total_size);
tools/lib/bpf/ringbuf.c
617
return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask);
tools/lib/bpf/ringbuf.c
63
munmap(r->consumer_pos, rb->page_size);
tools/lib/bpf/ringbuf.c
630
void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms)
tools/lib/bpf/ringbuf.c
650
sample = user_ring_buffer__reserve(rb, size);
tools/lib/bpf/ringbuf.c
667
cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining);
tools/lib/bpf/ringbuf.c
67
munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
tools/lib/bpf/ringbuf.c
683
return user_ring_buffer__reserve(rb, size);
tools/lib/bpf/ringbuf.c
75
int ring_buffer__add(struct ring_buffer *rb, int map_fd,
tools/lib/rbtree.c
59
static inline void rb_set_black(struct rb_node *rb)
tools/lib/rbtree.c
61
rb->__rb_parent_color += RB_BLACK;
tools/perf/arch/x86/tests/amd-ibs-period.c
148
static int rb_read(struct perf_event_mmap_page *rb, void *dest, size_t size)
tools/perf/arch/x86/tests/amd-ibs-period.c
154
base = (void *)rb + page_size;
tools/perf/arch/x86/tests/amd-ibs-period.c
156
data_head = rb->data_head;
tools/perf/arch/x86/tests/amd-ibs-period.c
158
data_tail = rb->data_tail;
tools/perf/arch/x86/tests/amd-ibs-period.c
165
rb->data_tail += size;
tools/perf/arch/x86/tests/amd-ibs-period.c
169
static void rb_skip(struct perf_event_mmap_page *rb, size_t size)
tools/perf/arch/x86/tests/amd-ibs-period.c
171
size_t data_head = rb->data_head;
tools/perf/arch/x86/tests/amd-ibs-period.c
175
if ((rb->data_tail + size) > data_head)
tools/perf/arch/x86/tests/amd-ibs-period.c
176
rb->data_tail = data_head;
tools/perf/arch/x86/tests/amd-ibs-period.c
178
rb->data_tail += size;
tools/perf/arch/x86/tests/amd-ibs-period.c
196
static int rb_drain_samples(struct perf_event_mmap_page *rb,
tools/perf/arch/x86/tests/amd-ibs-period.c
213
if (rb_read(rb, &hdr, sizeof(hdr)))
tools/perf/arch/x86/tests/amd-ibs-period.c
219
if (rb_read(rb, &period, sizeof(period)))
tools/perf/arch/x86/tests/amd-ibs-period.c
223
rb_skip(rb, hdr.size - sizeof(hdr));
tools/perf/arch/x86/tests/amd-ibs-period.c
309
void *rb;
tools/perf/arch/x86/tests/amd-ibs-period.c
329
rb = mmap(NULL, PERF_MMAP_TOTAL_SIZE, PROT_READ | PROT_WRITE,
tools/perf/arch/x86/tests/amd-ibs-period.c
331
if (rb == MAP_FAILED) {
tools/perf/arch/x86/tests/amd-ibs-period.c
343
ret = rb_drain_samples(rb, config->period, nr_samples,
tools/perf/arch/x86/tests/amd-ibs-period.c
350
munmap(rb, PERF_MMAP_TOTAL_SIZE);
tools/perf/arch/x86/tests/amd-ibs-period.c
493
void *rb;
tools/perf/arch/x86/tests/amd-ibs-period.c
516
rb = mmap(NULL, PERF_MMAP_TOTAL_SIZE, PROT_READ | PROT_WRITE,
tools/perf/arch/x86/tests/amd-ibs-period.c
518
if (rb == MAP_FAILED) {
tools/perf/arch/x86/tests/amd-ibs-period.c
529
ret = rb_drain_samples(rb, period->period, nr_samples,
tools/perf/arch/x86/tests/amd-ibs-period.c
533
ret = rb_drain_samples(rb, period->period, nr_samples,
tools/perf/arch/x86/tests/amd-ibs-period.c
538
munmap(rb, PERF_MMAP_TOTAL_SIZE);
tools/perf/arch/x86/tests/amd-ibs-period.c
794
void *rb;
tools/perf/arch/x86/tests/amd-ibs-period.c
822
rb = mmap(NULL, PERF_MMAP_TOTAL_SIZE, PROT_READ | PROT_WRITE,
tools/perf/arch/x86/tests/amd-ibs-period.c
824
if (rb == MAP_FAILED) {
tools/perf/arch/x86/tests/amd-ibs-period.c
837
ret = rb_drain_samples(rb, l3missonly->min_period, nr_samples, period_higher);
tools/perf/arch/x86/tests/amd-ibs-period.c
839
munmap(rb, PERF_MMAP_TOTAL_SIZE);
tools/perf/builtin-lock.c
100
rb = &(*rb)->rb_left;
tools/perf/builtin-lock.c
102
rb = &(*rb)->rb_right;
tools/perf/builtin-lock.c
107
rb_link_node(&new->rb, parent, rb);
tools/perf/builtin-lock.c
108
rb_insert_color(&new->rb, &thread_stats);
tools/perf/builtin-lock.c
1348
st = container_of(node, struct thread_stat, rb);
tools/perf/builtin-lock.c
149
rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
tools/perf/builtin-lock.c
150
rb_insert_color(&st->rb, &thread_stats);
tools/perf/builtin-lock.c
381
struct rb_node **rb = &sorted.rb_node;
tools/perf/builtin-lock.c
386
while (*rb) {
tools/perf/builtin-lock.c
387
p = container_of(*rb, struct lock_stat, rb);
tools/perf/builtin-lock.c
388
parent = *rb;
tools/perf/builtin-lock.c
414
rb = &(*rb)->rb_left;
tools/perf/builtin-lock.c
416
rb = &(*rb)->rb_right;
tools/perf/builtin-lock.c
419
rb_link_node(&st->rb, parent, rb);
tools/perf/builtin-lock.c
420
rb_insert_color(&st->rb, &sorted);
tools/perf/builtin-lock.c
426
struct rb_node **rb = &rr->rb_node;
tools/perf/builtin-lock.c
430
while (*rb) {
tools/perf/builtin-lock.c
431
p = container_of(*rb, struct lock_stat, rb);
tools/perf/builtin-lock.c
432
parent = *rb;
tools/perf/builtin-lock.c
435
rb = &(*rb)->rb_left;
tools/perf/builtin-lock.c
437
rb = &(*rb)->rb_right;
tools/perf/builtin-lock.c
440
rb_link_node(&st->rb, parent, rb);
tools/perf/builtin-lock.c
441
rb_insert_color(&st->rb, rr);
tools/perf/builtin-lock.c
464
return container_of(node, struct lock_stat, rb);
tools/perf/builtin-lock.c
77
st = container_of(node, struct thread_stat, rb);
tools/perf/builtin-lock.c
91
struct rb_node **rb = &thread_stats.rb_node;
tools/perf/builtin-lock.c
95
while (*rb) {
tools/perf/builtin-lock.c
96
p = container_of(*rb, struct thread_stat, rb);
tools/perf/builtin-lock.c
97
parent = *rb;
tools/perf/util/block-range.c
15
struct rb_node *rb;
tools/perf/util/block-range.c
18
for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) {
tools/perf/util/block-range.c
19
struct block_range *entry = rb_entry(rb, struct block_range, node);
tools/perf/util/lock-contention.h
102
struct rb_node rb;
tools/perf/util/lock-contention.h
29
struct rb_node rb; /* used for sorting */
tools/testing/selftests/bpf/progs/refcounted_kptr.c
144
struct bpf_rb_node *rb;
tools/testing/selftests/bpf/progs/refcounted_kptr.c
150
rb = bpf_rbtree_first(root);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
151
if (!rb) {
tools/testing/selftests/bpf/progs/refcounted_kptr.c
156
n = container_of(rb, struct node_data, r);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
164
rb = bpf_rbtree_remove(root, rb);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
166
if (!rb)
tools/testing/selftests/bpf/progs/refcounted_kptr.c
168
n = container_of(rb, struct node_data, r);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
509
struct bpf_rb_node *rb;
tools/testing/selftests/bpf/progs/refcounted_kptr.c
519
rb = bpf_rbtree_first(&root);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
520
if (!rb)
tools/testing/selftests/bpf/progs/refcounted_kptr.c
523
rb = bpf_rbtree_remove(&root, rb);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
524
if (!rb)
tools/testing/selftests/bpf/progs/refcounted_kptr.c
527
m = container_of(rb, struct node_data, r);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
543
struct bpf_rb_node *rb;
tools/testing/selftests/bpf/progs/refcounted_kptr.c
553
rb = bpf_rbtree_first(&root);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
554
if (!rb)
tools/testing/selftests/bpf/progs/refcounted_kptr.c
557
rb = bpf_rbtree_remove(&root, rb);
tools/testing/selftests/bpf/progs/refcounted_kptr.c
558
if (!rb)
tools/testing/selftests/bpf/progs/refcounted_kptr.c
561
m = container_of(rb, struct node_data, r);
tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
61
void *rb;
tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
66
rb = bpf_map_lookup_elem(&ringbuf_arr, &target_ring);
tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
67
if (!rb) {
tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
72
sample = bpf_ringbuf_reserve(rb, sizeof(*sample), 0);
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
219
void *rb;
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
226
void *rb;
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
230
rb = bpf_map_lookup_elem(&rb_in_map, &rb_slot);
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
231
if (!rb)
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
234
rb_ctx.rb = rb;
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
235
bpf_ringbuf_reserve_dynptr(rb, sz, 0, &rb_ctx.dptr);
tools/testing/selftests/bpf/progs/verifier_map_in_map.c
242
if (!ctx->rb)
tools/testing/selftests/net/mptcp/mptcp_connect.c
663
ssize_t rb = sizeof(rbuf);
tools/testing/selftests/net/mptcp/mptcp_connect.c
667
if (rb + total_rlen > cfg_truncate)
tools/testing/selftests/net/mptcp/mptcp_connect.c
668
rb = cfg_truncate - total_rlen;
tools/testing/selftests/net/mptcp/mptcp_connect.c
669
len = read(peerfd, rbuf, rb);
tools/testing/selftests/net/tls.c
1501
char rb[8001];
tools/testing/selftests/net/tls.c
1506
res = recv(self->cfd, rb,
tools/testing/selftests/net/tls.c
1507
left > sizeof(rb) ? sizeof(rb) : left, 0);
tools/testing/selftests/perf_events/mmap.c
115
rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
tools/testing/selftests/perf_events/mmap.c
116
if (rb == MAP_FAILED) {
tools/testing/selftests/perf_events/mmap.c
130
rb->aux_offset = AUX_OFFS;
tools/testing/selftests/perf_events/mmap.c
131
rb->aux_size = AUX_SIZE;
tools/testing/selftests/perf_events/mmap.c
137
munmap(rb, RB_SIZE);
tools/testing/selftests/perf_events/mmap.c
145
munmap(rb, RB_SIZE);
tools/testing/selftests/perf_events/mmap.c
161
rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->fd, 0);
tools/testing/selftests/perf_events/mmap.c
162
ASSERT_NE(rb, MAP_FAILED);
tools/testing/selftests/perf_events/mmap.c
165
self->ptr = rb;
tools/testing/selftests/perf_events/mmap.c
172
rb->aux_offset = AUX_OFFS;
tools/testing/selftests/perf_events/mmap.c
173
rb->aux_size = AUX_SIZE;
tools/testing/selftests/perf_events/mmap.c
47
FIXTURE_VARIANT_ADD(perf_mmap, rb)
tools/testing/selftests/perf_events/mmap.c
85
struct perf_event_mmap_page *rb;
tools/testing/selftests/powerpc/include/reg.h
109
#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \
tools/testing/selftests/powerpc/include/reg.h
110
((rb) << 11) | (((xs) >> 5)))
tools/testing/selftests/powerpc/include/reg.h
111
#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
tools/testing/selftests/powerpc/include/reg.h
112
#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
tools/testing/selftests/vfio/lib/iommu.c
373
const struct iommu_iova_range *ra = a, *rb = b;
tools/testing/selftests/vfio/lib/iommu.c
375
if (ra->start < rb->start)
tools/testing/selftests/vfio/lib/iommu.c
378
if (ra->start > rb->start)
tools/testing/vma/include/dup.h
594
struct rb_node rb;
tools/testing/vma/include/stubs.h
259
struct rb_root_cached *rb)
tools/testing/vma/include/stubs.h
264
struct rb_root_cached *rb)
tools/testing/vma/include/stubs.h
273
struct rb_root_cached *rb)
tools/testing/vma/include/stubs.h
278
struct rb_root_cached *rb)
tools/tracing/rtla/src/timerlat_bpf.c
120
struct ring_buffer *rb;
tools/tracing/rtla/src/timerlat_bpf.c
123
rb = ring_buffer__new(bpf_map__fd(bpf->maps.signal_stop_tracing),
tools/tracing/rtla/src/timerlat_bpf.c
125
retval = ring_buffer__poll(rb, timeout * 1000);
tools/tracing/rtla/src/timerlat_bpf.c
126
ring_buffer__free(rb);