Symbol: cur
arch/arc/kernel/kprobes.c
248
struct kprobe *cur = kprobe_running();
arch/arc/kernel/kprobes.c
251
if (!cur)
arch/arc/kernel/kprobes.c
254
resume_execution(cur, addr, regs);
arch/arc/kernel/kprobes.c
257
arch_arm_kprobe(cur);
arch/arc/kernel/kprobes.c
266
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
arch/arc/kernel/kprobes.c
268
cur->post_handler(cur, regs, 0);
arch/arc/kernel/kprobes.c
292
struct kprobe *cur = kprobe_running();
arch/arc/kernel/kprobes.c
304
resume_execution(cur, (unsigned long)cur->addr, regs);
arch/arc/kernel/unwind.c
451
const u8 *cur = *pcur;
arch/arc/kernel/unwind.c
455
for (shift = 0, value = 0; cur < end; shift += 7) {
arch/arc/kernel/unwind.c
457
&& (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
arch/arc/kernel/unwind.c
458
cur = end + 1;
arch/arc/kernel/unwind.c
461
value |= (uleb128_t) (*cur & 0x7f) << shift;
arch/arc/kernel/unwind.c
462
if (!(*cur++ & 0x80))
arch/arc/kernel/unwind.c
465
*pcur = cur;
arch/arc/kernel/unwind.c
472
const u8 *cur = *pcur;
arch/arc/kernel/unwind.c
476
for (shift = 0, value = 0; cur < end; shift += 7) {
arch/arc/kernel/unwind.c
478
&& (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
arch/arc/kernel/unwind.c
479
cur = end + 1;
arch/arc/kernel/unwind.c
482
value |= (sleb128_t) (*cur & 0x7f) << shift;
arch/arc/kernel/unwind.c
483
if (!(*cur & 0x80)) {
arch/arc/kernel/unwind.c
484
value |= -(*cur++ & 0x40) << shift;
arch/arc/kernel/unwind.c
488
*pcur = cur;
arch/arc/kernel/unwind.c
954
const u8 *cur =
arch/arc/kernel/unwind.c
957
startLoc = read_pointer(&cur,
arch/arc/kernel/unwind.c
958
cur + tableSize,
arch/arc/kernel/unwind.c
963
ptr = cur - tableSize;
arch/arm/include/asm/current.h
19
struct task_struct *cur;
arch/arm/include/asm/current.h
28
cur = __builtin_thread_pointer();
arch/arm/include/asm/current.h
50
: "=r"(cur));
arch/arm/include/asm/current.h
54
cur = __current;
arch/arm/include/asm/current.h
56
asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
arch/arm/include/asm/current.h
58
return cur;
arch/arm/kernel/smp.c
400
static void set_current(struct task_struct *cur)
arch/arm/kernel/smp.c
403
asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
arch/arm/mach-rpc/dma.c
100
unsigned int status, cur, end;
arch/arm/mach-rpc/dma.c
113
cur = CURA;
arch/arm/mach-rpc/dma.c
116
cur = CURB;
arch/arm/mach-rpc/dma.c
119
writel(idma->cur_addr, base + cur);
arch/arm/probes/kprobes/core.c
238
struct kprobe *p, *cur;
arch/arm/probes/kprobes/core.c
242
cur = kprobe_running();
arch/arm/probes/kprobes/core.c
267
} else if (cur) {
arch/arm/probes/kprobes/core.c
333
struct kprobe *cur = kprobe_running();
arch/arm/probes/kprobes/core.c
346
regs->ARM_pc = (long)cur->addr;
arch/arm64/include/asm/cpufeature.h
958
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
arch/arm64/kernel/alternative.c
126
u64 cur, d_size, ctr_el0;
arch/arm64/kernel/alternative.c
131
cur = start & ~(d_size - 1);
arch/arm64/kernel/alternative.c
138
asm volatile("dc civac, %0" : : "r" (cur) : "memory");
arch/arm64/kernel/alternative.c
139
} while (cur += d_size, cur < end);
arch/arm64/kernel/cpufeature.c
930
s64 cur)
arch/arm64/kernel/cpufeature.c
939
ret = min(new, cur);
arch/arm64/kernel/cpufeature.c
942
if (!cur || !new)
arch/arm64/kernel/cpufeature.c
946
ret = max(new, cur);
arch/arm64/kernel/probes/kprobes.c
261
post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
arch/arm64/kernel/probes/kprobes.c
264
if (cur->ainsn.xol_restore != 0)
arch/arm64/kernel/probes/kprobes.c
265
instruction_pointer_set(regs, cur->ainsn.xol_restore);
arch/arm64/kernel/probes/kprobes.c
274
if (cur->post_handler)
arch/arm64/kernel/probes/kprobes.c
275
cur->post_handler(cur, regs, 0);
arch/arm64/kernel/probes/kprobes.c
282
struct kprobe *cur = kprobe_running();
arch/arm64/kernel/probes/kprobes.c
295
instruction_pointer_set(regs, (unsigned long) cur->addr);
arch/arm64/kernel/probes/kprobes.c
359
struct kprobe *cur = kprobe_running();
arch/arm64/kernel/probes/kprobes.c
361
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
arch/arm64/kernel/probes/kprobes.c
362
((unsigned long)&cur->ainsn.xol_insn[1] == addr)) {
arch/arm64/kernel/probes/kprobes.c
364
post_kprobe_handler(cur, kcb, regs);
arch/arm64/kvm/hyp/nvhe/early_alloc.c
17
static unsigned long cur;
arch/arm64/kvm/hyp/nvhe/early_alloc.c
21
return (cur - base) >> PAGE_SHIFT;
arch/arm64/kvm/hyp/nvhe/early_alloc.c
27
void *ret = (void *)cur;
arch/arm64/kvm/hyp/nvhe/early_alloc.c
32
if (end - cur < size)
arch/arm64/kvm/hyp/nvhe/early_alloc.c
35
cur += size;
arch/arm64/kvm/hyp/nvhe/early_alloc.c
51
base = cur = (unsigned long)virt;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
392
int cur, left = 0, right = hyp_memblock_nr;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
401
cur = (left + right) >> 1;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
402
reg = &hyp_memory[cur];
arch/arm64/kvm/hyp/nvhe/mem_protect.c
405
right = cur;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
408
left = cur + 1;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
495
struct kvm_mem_range cur;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
519
cur.start = ALIGN_DOWN(addr, granule);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
520
cur.end = cur.start + granule;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
521
if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL)
arch/arm64/kvm/hyp/nvhe/mem_protect.c
523
*range = cur;
arch/arm64/kvm/hyp/nvhe/mem_protect.c
851
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
869
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/mem_protect.c
870
p = hyp_virt_to_page(cur);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
873
WARN_ON(pkvm_create_mappings_locked((void *)cur,
arch/arm64/kvm/hyp/nvhe/mem_protect.c
874
(void *)cur + PAGE_SIZE,
arch/arm64/kvm/hyp/nvhe/mem_protect.c
887
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
894
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/hyp/nvhe/mem_protect.c
895
p = hyp_virt_to_page(cur);
arch/arm64/kvm/hyp/nvhe/mem_protect.c
897
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
arch/arm64/kvm/hyp/nvhe/mm.c
49
unsigned long cur;
arch/arm64/kvm/hyp/nvhe/mm.c
57
cur = start + PAGE_ALIGN(size);
arch/arm64/kvm/hyp/nvhe/mm.c
60
if (cur > __hyp_vmemmap)
arch/arm64/kvm/hyp/nvhe/mm.c
63
__io_map_base = cur;
arch/arm64/kvm/mmu.c
535
phys_addr_t start, end, cur;
arch/arm64/kvm/mmu.c
555
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/mmu.c
556
pfn = __phys_to_pfn(cur);
arch/arm64/kvm/mmu.c
567
phys_addr_t start, end, cur;
arch/arm64/kvm/mmu.c
575
for (cur = start; cur < end; cur += PAGE_SIZE) {
arch/arm64/kvm/mmu.c
576
pfn = __phys_to_pfn(cur);
arch/arm64/kvm/sys_regs.c
1656
s64 new, s64 cur)
arch/arm64/kvm/sys_regs.c
1678
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
arch/arm64/kvm/vgic/vgic-its.c
1112
struct its_device *cur, *temp;
arch/arm64/kvm/vgic/vgic-its.c
1114
list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
arch/arm64/kvm/vgic/vgic-its.c
1115
vgic_its_free_device(kvm, its, cur);
arch/arm64/kvm/vgic/vgic-its.c
1121
struct its_collection *cur, *temp;
arch/arm64/kvm/vgic/vgic-its.c
1123
list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
arch/arm64/kvm/vgic/vgic-its.c
1124
vgic_its_free_collection(its, cur->collection_id);
arch/csky/kernel/cpu-probe.c
14
unsigned int cur, next, i;
arch/csky/kernel/cpu-probe.c
20
cur = mfcr("cr13");
arch/csky/kernel/cpu-probe.c
22
seq_printf(m, "product info[%d] : 0x%08x\n", i, cur);
arch/csky/kernel/cpu-probe.c
27
if (cur == next)
arch/csky/kernel/cpu-probe.c
30
cur = next;
arch/csky/kernel/probes/kprobes.c
247
struct kprobe *cur = kprobe_running();
arch/csky/kernel/probes/kprobes.c
249
if (!cur)
arch/csky/kernel/probes/kprobes.c
253
if (cur->ainsn.api.restore != 0)
arch/csky/kernel/probes/kprobes.c
254
regs->pc = cur->ainsn.api.restore;
arch/csky/kernel/probes/kprobes.c
264
if (cur->post_handler) {
arch/csky/kernel/probes/kprobes.c
268
cur->post_handler(cur, regs, 0);
arch/csky/kernel/probes/kprobes.c
276
struct kprobe *cur = kprobe_running();
arch/csky/kernel/probes/kprobes.c
289
regs->pc = (unsigned long) cur->addr;
arch/loongarch/kernel/kprobes.c
128
static void post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb,
arch/loongarch/kernel/kprobes.c
132
if (cur->ainsn.restore != 0)
arch/loongarch/kernel/kprobes.c
133
instruction_pointer_set(regs, cur->ainsn.restore);
arch/loongarch/kernel/kprobes.c
147
if (cur->post_handler)
arch/loongarch/kernel/kprobes.c
148
cur->post_handler(cur, regs, 0);
arch/loongarch/kernel/kprobes.c
272
struct kprobe *cur = kprobe_running();
arch/loongarch/kernel/kprobes.c
276
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
arch/loongarch/kernel/kprobes.c
277
((unsigned long)&cur->ainsn.insn[1] == addr)) {
arch/loongarch/kernel/kprobes.c
279
post_kprobe_handler(cur, kcb, regs);
arch/loongarch/kernel/kprobes.c
290
struct kprobe *cur = kprobe_running();
arch/loongarch/kernel/kprobes.c
303
regs->csr_era = (unsigned long)cur->addr;
arch/m68k/sun3/sun3dvma.c
101
list_for_each(cur, &hole_list) {
arch/m68k/sun3/sun3dvma.c
102
hole = list_entry(cur, struct hole, list);
arch/m68k/sun3/sun3dvma.c
142
struct list_head *cur;
arch/m68k/sun3/sun3dvma.c
154
list_for_each(cur, &hole_list) {
arch/m68k/sun3/sun3dvma.c
157
hole = list_entry(cur, struct hole, list);
arch/m68k/sun3/sun3dvma.c
195
struct list_head *cur;
arch/m68k/sun3/sun3dvma.c
207
list_for_each(cur, &hole_list) {
arch/m68k/sun3/sun3dvma.c
208
hole = list_entry(cur, struct hole, list);
arch/m68k/sun3/sun3dvma.c
229
list_add(&(hole->list), cur);
arch/m68k/sun3/sun3dvma.c
75
struct list_head *cur;
arch/m68k/sun3/sun3dvma.c
79
list_for_each(cur, holes) {
arch/m68k/sun3/sun3dvma.c
80
hole = list_entry(cur, struct hole, list);
arch/m68k/sun3/sun3dvma.c
98
struct list_head *cur;
arch/mips/boot/elf2ecoff.c
71
int remaining, cur, count;
arch/mips/boot/elf2ecoff.c
81
cur = remaining;
arch/mips/boot/elf2ecoff.c
82
if (cur > sizeof ibuf)
arch/mips/boot/elf2ecoff.c
83
cur = sizeof ibuf;
arch/mips/boot/elf2ecoff.c
84
remaining -= cur;
arch/mips/boot/elf2ecoff.c
85
if ((count = read(in, ibuf, cur)) != cur) {
arch/mips/boot/elf2ecoff.c
91
if ((count = write(out, ibuf, cur)) != cur) {
arch/mips/cavium-octeon/csrc-octeon.c
146
u64 cur, end, inc;
arch/mips/cavium-octeon/csrc-octeon.c
148
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
151
end = cur + inc;
arch/mips/cavium-octeon/csrc-octeon.c
153
while (end > cur)
arch/mips/cavium-octeon/csrc-octeon.c
154
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
160
u64 cur, end, inc;
arch/mips/cavium-octeon/csrc-octeon.c
162
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
165
end = cur + inc;
arch/mips/cavium-octeon/csrc-octeon.c
167
while (end > cur)
arch/mips/cavium-octeon/csrc-octeon.c
168
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
174
u64 cur, end;
arch/mips/cavium-octeon/csrc-octeon.c
176
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
177
end = cur + loops;
arch/mips/cavium-octeon/csrc-octeon.c
179
while (end > cur)
arch/mips/cavium-octeon/csrc-octeon.c
180
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
195
u64 cur, end;
arch/mips/cavium-octeon/csrc-octeon.c
197
cur = read_c0_cvmcount();
arch/mips/cavium-octeon/csrc-octeon.c
207
end = cur + end;
arch/mips/cavium-octeon/csrc-octeon.c
209
end = cur + count;
arch/mips/cavium-octeon/csrc-octeon.c
211
while (end > cur)
arch/mips/cavium-octeon/csrc-octeon.c
212
cur = read_c0_cvmcount();
arch/mips/include/asm/sgiarcs.h
180
struct linux_bigint cur;
arch/mips/kernel/kprobes.c
381
struct kprobe *cur = kprobe_running();
arch/mips/kernel/kprobes.c
384
if (!cur)
arch/mips/kernel/kprobes.c
387
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
arch/mips/kernel/kprobes.c
389
cur->post_handler(cur, regs, 0);
arch/mips/kernel/kprobes.c
392
resume_execution(cur, regs, kcb);
arch/mips/kernel/kprobes.c
410
struct kprobe *cur = kprobe_running();
arch/mips/kernel/kprobes.c
414
resume_execution(cur, regs, kcb);
arch/mips/kvm/vz.c
2141
unsigned int cur, change;
arch/mips/kvm/vz.c
2285
cur = read_gc0_config();
arch/mips/kvm/vz.c
2286
change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2288
v = cur ^ change;
arch/mips/kvm/vz.c
2295
cur = read_gc0_config1();
arch/mips/kvm/vz.c
2296
change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2298
v = cur ^ change;
arch/mips/kvm/vz.c
2305
cur = read_gc0_config2();
arch/mips/kvm/vz.c
2306
change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2308
v = cur ^ change;
arch/mips/kvm/vz.c
2315
cur = read_gc0_config3();
arch/mips/kvm/vz.c
2316
change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2318
v = cur ^ change;
arch/mips/kvm/vz.c
2325
cur = read_gc0_config4();
arch/mips/kvm/vz.c
2326
change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2328
v = cur ^ change;
arch/mips/kvm/vz.c
2335
cur = read_gc0_config5();
arch/mips/kvm/vz.c
2336
change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2338
v = cur ^ change;
arch/mips/kvm/vz.c
2343
cur = kvm_read_sw_gc0_config6(cop0);
arch/mips/kvm/vz.c
2344
change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
arch/mips/kvm/vz.c
2346
v = cur ^ change;
arch/mips/mm/tlbex.c
1707
int cur = pte;
arch/mips/mm/tlbex.c
1715
uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
arch/mips/mm/tlbex.c
1716
cur = t;
arch/mips/mm/tlbex.c
1718
uasm_i_andi(p, t, cur, 1);
arch/mips/mm/tlbex.c
1726
uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
arch/mips/mm/tlbex.c
1727
cur = t;
arch/mips/mm/tlbex.c
1729
uasm_i_andi(p, t, cur,
arch/mips/mm/tlbex.c
1759
int cur = pte;
arch/mips/mm/tlbex.c
1762
uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
arch/mips/mm/tlbex.c
1763
cur = t;
arch/mips/mm/tlbex.c
1765
uasm_i_andi(p, t, cur,
arch/powerpc/kernel/kprobes.c
396
struct kprobe *cur = kprobe_running();
arch/powerpc/kernel/kprobes.c
399
if (!cur || user_mode(regs))
arch/powerpc/kernel/kprobes.c
402
len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
arch/powerpc/kernel/kprobes.c
404
if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
arch/powerpc/kernel/kprobes.c
407
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
arch/powerpc/kernel/kprobes.c
409
cur->post_handler(cur, regs, 0);
arch/powerpc/kernel/kprobes.c
413
regs_set_return_ip(regs, (unsigned long)cur->addr + len);
arch/powerpc/kernel/kprobes.c
439
struct kprobe *cur = kprobe_running();
arch/powerpc/kernel/kprobes.c
453
regs_set_return_ip(regs, (unsigned long)cur->addr);
arch/powerpc/kvm/book3s_hv.c
4607
ktime_t cur, start_poll, start_wait;
arch/powerpc/kvm/book3s_hv.c
4614
cur = start_poll = ktime_get();
arch/powerpc/kvm/book3s_hv.c
4627
cur = ktime_get();
arch/powerpc/kvm/book3s_hv.c
4628
} while (kvm_vcpu_can_poll(cur, stop));
arch/powerpc/kvm/book3s_hv.c
4662
cur = ktime_get();
arch/powerpc/kvm/book3s_hv.c
4665
block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
arch/powerpc/kvm/book3s_hv.c
4670
ktime_to_ns(cur) - ktime_to_ns(start_wait);
arch/powerpc/kvm/book3s_hv.c
4673
ktime_to_ns(cur) - ktime_to_ns(start_wait));
arch/powerpc/kvm/book3s_hv.c
4688
ktime_to_ns(cur) -
arch/powerpc/kvm/book3s_hv.c
4692
ktime_to_ns(cur) - ktime_to_ns(start_poll));
arch/powerpc/kvm/book3s_xive.h
275
u32 cur;
arch/powerpc/kvm/book3s_xive.h
279
cur = be32_to_cpup(qpage + *idx);
arch/powerpc/kvm/book3s_xive.h
280
if ((cur >> 31) == *toggle)
arch/powerpc/kvm/book3s_xive.h
285
return cur & 0x7fffffff;
arch/powerpc/platforms/powermac/pci.c
668
int i, cur = -1;
arch/powerpc/platforms/powermac/pci.c
684
if (++cur >= 3) {
arch/powerpc/platforms/powermac/pci.c
688
hose->mem_resources[cur].flags = IORESOURCE_MEM;
arch/powerpc/platforms/powermac/pci.c
689
hose->mem_resources[cur].name = hose->dn->full_name;
arch/powerpc/platforms/powermac/pci.c
690
hose->mem_resources[cur].start = base;
arch/powerpc/platforms/powermac/pci.c
691
hose->mem_resources[cur].end = end;
arch/powerpc/platforms/powermac/pci.c
692
hose->mem_offset[cur] = 0;
arch/powerpc/platforms/powermac/pci.c
693
DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end);
arch/powerpc/platforms/powermac/pci.c
696
hose->mem_resources[cur].end = end;
arch/powerpc/platforms/powernv/opal-powercap.c
165
u32 cur, min, max;
arch/powerpc/platforms/powernv/opal-powercap.c
179
if (!of_property_read_u32(node, "powercap-current", &cur)) {
arch/powerpc/platforms/powernv/opal-powercap.c
217
powercap_add_attr(cur, "powercap-current",
arch/powerpc/sysdev/xive/common.c
105
u32 cur;
arch/powerpc/sysdev/xive/common.c
109
cur = be32_to_cpup(q->qpage + q->idx);
arch/powerpc/sysdev/xive/common.c
112
if ((cur >> 31) == q->toggle)
arch/powerpc/sysdev/xive/common.c
125
return cur & 0x7fffffff;
arch/riscv/kernel/probes/kprobes.c
224
post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
arch/riscv/kernel/probes/kprobes.c
227
if (cur->ainsn.api.restore != 0)
arch/riscv/kernel/probes/kprobes.c
228
regs->epc = cur->ainsn.api.restore;
arch/riscv/kernel/probes/kprobes.c
238
if (cur->post_handler) {
arch/riscv/kernel/probes/kprobes.c
242
cur->post_handler(cur, regs, 0);
arch/riscv/kernel/probes/kprobes.c
250
struct kprobe *cur = kprobe_running();
arch/riscv/kernel/probes/kprobes.c
263
regs->epc = (unsigned long) cur->addr;
arch/riscv/kernel/probes/kprobes.c
341
struct kprobe *cur = kprobe_running();
arch/riscv/kernel/probes/kprobes.c
343
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
arch/riscv/kernel/probes/kprobes.c
344
((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
arch/riscv/kernel/probes/kprobes.c
346
post_kprobe_handler(cur, kcb, regs);
arch/riscv/kernel/vector.c
166
static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt,
arch/riscv/kernel/vector.c
171
ctrl = cur & PR_RISCV_V_VSTATE_CTRL_CUR_MASK;
arch/riscv/kernel/vector.c
233
int cur, next;
arch/riscv/kernel/vector.c
241
cur = PR_RISCV_V_VSTATE_CTRL_ON;
arch/riscv/kernel/vector.c
243
cur = PR_RISCV_V_VSTATE_CTRL_OFF;
arch/riscv/kernel/vector.c
245
cur = next;
arch/riscv/kernel/vector.c
252
riscv_v_ctrl_set(tsk, cur, next, inherit);
arch/riscv/kernel/vector.c
266
int cur, next;
arch/riscv/kernel/vector.c
274
cur = VSTATE_CTRL_GET_CUR(arg);
arch/riscv/kernel/vector.c
275
switch (cur) {
arch/riscv/kernel/vector.c
285
cur = riscv_v_ctrl_get_cur(current);
arch/riscv/kernel/vector.c
297
riscv_v_ctrl_set(current, cur, next, inherit);
arch/s390/kvm/dat.c
1311
gfn_t cur;
arch/s390/kvm/dat.c
1314
for (cur = ALIGN_DOWN(gfn, _PAGE_ENTRIES); cur < gfn + count; cur += _PAGE_ENTRIES) {
arch/s390/kvm/dat.c
1315
rc = dat_entry_walk(mc, cur, asce, DAT_WALK_ALLOC, TABLE_TYPE_PAGE_TABLE,
arch/s390/kvm/dat.c
539
gfn_t cur, next;
arch/s390/kvm/dat.c
546
for (cur = ALIGN_DOWN(start, cur_size); cur < end; idx++, cur = next) {
arch/s390/kvm/dat.c
547
next = cur + cur_size;
arch/s390/kvm/dat.c
560
rc = the_op(walk->last, cur, next, walk);
arch/s390/kvm/dat.c
567
rc = dat_crste_walk_range(max(start, cur), min(end, next),
arch/s390/kvm/dat.c
570
rc = dat_pte_walk_range(max(start, cur), min(end, next),
arch/s390/kvm/gmap.c
936
phys_addr_t origin, cur, end;
arch/s390/kvm/gmap.c
942
cur = ((max(gfn, walk->start) - gfn) << PAGE_SHIFT) + origin;
arch/s390/kvm/gmap.c
944
for ( ; cur < end; cur += PAGE_SIZE)
arch/s390/kvm/gmap.c
945
__kvm_s390_pv_destroy_page(phys_to_page(cur));
arch/s390/kvm/pv.c
554
struct pv_vm_to_be_destroyed *cur;
arch/s390/kvm/pv.c
581
cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
arch/s390/kvm/pv.c
583
if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
arch/s390/kvm/pv.c
595
list_del(&cur->list);
arch/s390/kvm/pv.c
596
kfree(cur);
arch/s390/kvm/vsie.c
601
struct vsie_page *cur, *next;
arch/s390/kvm/vsie.c
609
list_for_each_entry_safe(cur, next, &gmap->scb_users, gmap_cache.list) {
arch/s390/kvm/vsie.c
610
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
arch/s390/kvm/vsie.c
612
prefix += cur->scb_s.mso;
arch/s390/kvm/vsie.c
614
prefix_unmapped_sync(cur);
arch/sh/kernel/kprobes.c
308
struct kprobe *cur = kprobe_running();
arch/sh/kernel/kprobes.c
313
if (!cur)
arch/sh/kernel/kprobes.c
316
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
arch/sh/kernel/kprobes.c
318
cur->post_handler(cur, regs, 0);
arch/sh/kernel/kprobes.c
357
struct kprobe *cur = kprobe_running();
arch/sh/kernel/kprobes.c
371
regs->pc = (unsigned long)cur->addr;
arch/sparc/kernel/kprobes.c
295
struct kprobe *cur = kprobe_running();
arch/sparc/kernel/kprobes.c
298
if (!cur)
arch/sparc/kernel/kprobes.c
301
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
arch/sparc/kernel/kprobes.c
303
cur->post_handler(cur, regs, 0);
arch/sparc/kernel/kprobes.c
306
resume_execution(cur, regs, kcb);
arch/sparc/kernel/kprobes.c
322
struct kprobe *cur = kprobe_running();
arch/sparc/kernel/kprobes.c
336
regs->tpc = (unsigned long)cur->addr;
arch/x86/events/intel/pt.c
1205
buf->cur = &cur_tp->topa;
arch/x86/events/intel/pt.c
1206
buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
arch/x86/events/intel/pt.c
656
base = topa_to_page(buf->cur)->table;
arch/x86/events/intel/pt.c
730
buf->first = buf->last = buf->cur = topa;
arch/x86/events/intel/pt.c
853
if (buf->cur_idx == buf->cur->last) {
arch/x86/events/intel/pt.c
854
if (buf->cur == buf->last) {
arch/x86/events/intel/pt.c
855
buf->cur = buf->first;
arch/x86/events/intel/pt.c
858
buf->cur = list_entry(buf->cur->list.next, struct topa,
arch/x86/events/intel/pt.c
885
base = buf->cur->offset + buf->output_off;
arch/x86/events/intel/pt.c
889
base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
arch/x86/events/intel/pt.c
909
return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
arch/x86/events/intel/pt.c
918
return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
arch/x86/events/intel/pt.c
992
buf->cur = &tp->topa;
arch/x86/events/intel/pt.h
75
struct topa *first, *last, *cur;
arch/x86/events/intel/uncore_discovery.c
32
#define __node_2_type(cur) \
arch/x86/events/intel/uncore_discovery.c
33
rb_entry((cur), struct intel_uncore_discovery_type, node)
arch/x86/hyperv/ivm.c
638
int cur, i;
arch/x86/hyperv/ivm.c
647
for (i = 0, cur = 0; i < ent->count; i++) {
arch/x86/hyperv/ivm.c
648
input->gpa_page_list[cur] = ent->pfn + i;
arch/x86/hyperv/ivm.c
649
cur++;
arch/x86/hyperv/ivm.c
651
if (cur == HV_MAX_MODIFY_GPA_REP_COUNT || i == ent->count - 1) {
arch/x86/hyperv/ivm.c
658
cur, 0, input, NULL);
arch/x86/hyperv/ivm.c
660
cur = 0;
arch/x86/hyperv/mmu.c
29
unsigned long cur = start, diff;
arch/x86/hyperv/mmu.c
32
diff = end > cur ? end - cur : 0;
arch/x86/hyperv/mmu.c
34
gva_list[gva_n] = cur & PAGE_MASK;
arch/x86/hyperv/mmu.c
41
cur += HV_TLB_FLUSH_UNIT;
arch/x86/hyperv/mmu.c
44
cur = end;
arch/x86/hyperv/mmu.c
49
} while (cur < end);
arch/x86/hyperv/nested.c
60
u64 cur = start_gfn;
arch/x86/hyperv/nested.c
76
flush->gpa_list[gpa_n].page.basepfn = cur;
arch/x86/hyperv/nested.c
79
cur += additional_pages + 1;
arch/x86/kernel/cpu/common.c
2465
struct task_struct *cur = current;
arch/x86/kernel/cpu/common.c
2481
memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
arch/x86/kernel/cpu/common.c
2494
cur->active_mm = &init_mm;
arch/x86/kernel/cpu/common.c
2495
BUG_ON(cur->mm);
arch/x86/kernel/cpu/common.c
2497
enter_lazy_tlb(&init_mm, cur);
arch/x86/kernel/kprobes/core.c
1035
struct kprobe *cur = kprobe_running();
arch/x86/kernel/kprobes/core.c
1038
if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
arch/x86/kernel/kprobes/core.c
1049
regs->ip = (unsigned long)cur->addr;
arch/x86/kernel/kprobes/core.c
843
static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
arch/x86/kernel/kprobes/core.c
856
if (cur->post_handler)
arch/x86/kernel/kprobes/core.c
857
cur->post_handler(cur, regs, 0);
arch/x86/kernel/reboot_fixups_32.c
84
const struct device_fixup *cur;
arch/x86/kernel/reboot_fixups_32.c
94
cur = &(fixups_table[i]);
arch/x86/kernel/reboot_fixups_32.c
95
dev = pci_get_device(cur->vendor, cur->device, NULL);
arch/x86/kernel/reboot_fixups_32.c
99
cur->reboot_fixup(dev);
arch/x86/kernel/signal_32.c
64
u16 cur;
arch/x86/kernel/signal_32.c
72
savesegment(gs, cur);
arch/x86/kernel/signal_32.c
73
if (fixup_rpl(sc->gs) != cur)
arch/x86/kernel/signal_32.c
75
savesegment(fs, cur);
arch/x86/kernel/signal_32.c
76
if (fixup_rpl(sc->fs) != cur)
arch/x86/kernel/signal_32.c
79
savesegment(ds, cur);
arch/x86/kernel/signal_32.c
80
if (fixup_rpl(sc->ds) != cur)
arch/x86/kernel/signal_32.c
82
savesegment(es, cur);
arch/x86/kernel/signal_32.c
83
if (fixup_rpl(sc->es) != cur)
arch/x86/kernel/tsc_sync.c
123
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
arch/x86/kernel/tsc_sync.c
153
cur->adjusted = bootval;
arch/x86/kernel/tsc_sync.c
159
struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
arch/x86/kernel/tsc_sync.c
170
cur->bootval = bootval;
arch/x86/kernel/tsc_sync.c
171
cur->nextcheck = jiffies + HZ;
arch/x86/kernel/tsc_sync.c
172
tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(), bootcpu);
arch/x86/kernel/tsc_sync.c
183
struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
arch/x86/kernel/tsc_sync.c
192
cur->bootval = bootval;
arch/x86/kernel/tsc_sync.c
193
cur->nextcheck = jiffies + HZ;
arch/x86/kernel/tsc_sync.c
194
cur->warned = false;
arch/x86/kernel/tsc_sync.c
199
cur->adjusted = bootval;
arch/x86/kernel/tsc_sync.c
212
tsc_sanitize_first_cpu(cur, bootval, smp_processor_id(),
arch/x86/kernel/tsc_sync.c
232
cur->adjusted = ref->adjusted;
arch/x86/kernel/tsc_sync.c
434
struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
arch/x86/kernel/tsc_sync.c
517
cur->adjusted += cur_max_warp;
arch/x86/kernel/tsc_sync.c
520
cpu, cur_max_warp, cur->adjusted);
arch/x86/kernel/tsc_sync.c
522
wrmsrq(MSR_IA32_TSC_ADJUST, cur->adjusted);
arch/x86/kvm/debugfs.c
122
cur = log[k];
arch/x86/kvm/debugfs.c
127
cur[index]++;
arch/x86/kvm/debugfs.c
146
cur = log[i];
arch/x86/kvm/debugfs.c
148
seq_printf(m, "%d\t", cur[j]);
arch/x86/kvm/debugfs.c
97
unsigned int *log[KVM_NR_PAGE_SIZES], *cur;
arch/x86/mm/dump_pagetables.c
305
pgprotval_t cur, eff;
arch/x86/mm/dump_pagetables.c
320
cur = st->current_prot;
arch/x86/mm/dump_pagetables.c
332
} else if (new_prot != cur || new_eff != eff || level != st->level ||
block/blk-cgroup.c
1028
static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
block/blk-cgroup.c
1036
blkg_iostat_set(&delta, cur);
block/blk-cgroup.c
1038
blkg_iostat_add(&blkg->iostat.cur, &delta);
block/blk-cgroup.c
1070
struct blkg_iostat cur;
block/blk-cgroup.c
1090
blkg_iostat_set(&cur, &bisc->cur);
block/blk-cgroup.c
1093
blkcg_iostat_update(blkg, &cur, &bisc->last);
block/blk-cgroup.c
1098
blkcg_iostat_update(parent, &blkg->iostat.cur,
block/blk-cgroup.c
1172
blkg_iostat_set(&blkg->iostat.cur, &tmp);
block/blk-cgroup.c
1198
rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
block/blk-cgroup.c
1199
wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
block/blk-cgroup.c
1200
dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
block/blk-cgroup.c
1201
rios = bis->cur.ios[BLKG_IOSTAT_READ];
block/blk-cgroup.c
1202
wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
block/blk-cgroup.c
1203
dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
block/blk-cgroup.c
1895
u64 cur = atomic64_read(&blkg->delay_nsec);
block/blk-cgroup.c
1912
if (unlikely(cur < sub)) {
block/blk-cgroup.c
1917
blkg->last_delay = cur - sub;
block/blk-cgroup.c
2212
bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
block/blk-cgroup.c
2214
bis->cur.ios[rwd]++;
block/blk-cgroup.c
628
struct blkg_iostat cur = {0};
block/blk-cgroup.c
632
blkg_iostat_set(&bis->cur, &cur);
block/blk-cgroup.c
633
blkg_iostat_set(&bis->last, &cur);
block/blk-cgroup.h
51
struct blkg_iostat cur;
block/blk-rq-qos.c
11
unsigned int cur = atomic_read(v);
block/blk-rq-qos.c
14
if (cur >= below)
block/blk-rq-qos.c
16
} while (!atomic_try_cmpxchg(v, &cur, cur + 1));
block/blk-rq-qos.c
359
struct rq_qos **cur;
block/blk-rq-qos.c
365
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
block/blk-rq-qos.c
366
if (*cur == rqos) {
block/blk-rq-qos.c
367
*cur = rqos->next;
block/blk-wbt.c
157
const unsigned long cur = jiffies;
block/blk-wbt.c
159
if (cur != *var)
block/blk-wbt.c
160
*var = cur;
block/elevator.c
838
struct elevator_type *cur = NULL, *e;
block/elevator.c
846
cur = q->elevator->type;
block/elevator.c
851
if (e == cur)
crypto/af_alg.c
1019
if (sgl->cur)
crypto/af_alg.c
1020
sg_unmark_end(sg + sgl->cur - 1);
crypto/af_alg.c
1025
.nents = sgl->cur,
crypto/af_alg.c
1026
.orig_nents = sgl->cur,
crypto/af_alg.c
1030
MAX_SGL_ENTS - sgl->cur, 0);
crypto/af_alg.c
1036
for (; sgl->cur < sgtable.nents; sgl->cur++)
crypto/af_alg.c
1037
get_page(sg_page(&sg[sgl->cur]));
crypto/af_alg.c
1045
unsigned int i = sgl->cur;
crypto/af_alg.c
1071
sgl->cur++;
crypto/af_alg.c
1072
} while (len && sgl->cur < MAX_SGL_ENTS);
crypto/af_alg.c
1078
sg_mark_end(sg + sgl->cur - 1);
crypto/af_alg.c
616
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
crypto/af_alg.c
624
sgl->cur = 0;
crypto/af_alg.c
660
for (i = 0; i < sgl->cur; i++) {
crypto/af_alg.c
697
for (i = 0; i < sgl->cur; i++) {
crypto/af_alg.c
981
sg = sgl->sg + sgl->cur - 1;
crypto/tcrypt.c
1159
struct test_mb_skcipher_data *cur = &data[j];
crypto/tcrypt.c
1164
sg_init_table(cur->sg, pages);
crypto/tcrypt.c
1167
sg_set_buf(cur->sg + p, cur->xbuf[p],
crypto/tcrypt.c
1169
memset(cur->xbuf[p], 0xff, PAGE_SIZE);
crypto/tcrypt.c
1174
sg_set_buf(cur->sg + p, cur->xbuf[p], k);
crypto/tcrypt.c
1175
memset(cur->xbuf[p], 0xff, k);
crypto/tcrypt.c
1177
skcipher_request_set_crypt(cur->req, cur->sg,
crypto/tcrypt.c
1178
cur->sg, bs, iv);
crypto/tcrypt.c
376
struct test_mb_aead_data *cur = &data[j];
crypto/tcrypt.c
378
assoc = cur->axbuf[0];
crypto/tcrypt.c
381
sg_init_aead(cur->sg, cur->xbuf,
crypto/tcrypt.c
385
sg_init_aead(cur->sgout, cur->xoutbuf,
crypto/tcrypt.c
389
aead_request_set_ad(cur->req, aad_size);
crypto/tcrypt.c
393
aead_request_set_crypt(cur->req,
crypto/tcrypt.c
394
cur->sgout,
crypto/tcrypt.c
395
cur->sg,
crypto/tcrypt.c
397
ret = crypto_aead_encrypt(cur->req);
crypto/tcrypt.c
398
ret = do_one_aead_op(cur->req, ret);
crypto/tcrypt.c
407
aead_request_set_crypt(cur->req, cur->sg,
crypto/tcrypt.c
408
cur->sgout, bs +
drivers/acpi/acpica/psargs.c
702
union acpi_parse_object *cur = start;
drivers/acpi/acpica/psargs.c
706
while (cur) {
drivers/acpi/acpica/psargs.c
707
next = cur->common.next;
drivers/acpi/acpica/psargs.c
711
arg = acpi_ps_get_arg(cur, 0);
drivers/acpi/acpica/psargs.c
716
acpi_ps_free_op(cur);
drivers/acpi/acpica/psargs.c
717
cur = next;
drivers/android/binder.c
225
atomic_t cur;
drivers/android/binder.c
237
unsigned int cur = atomic_inc_return(&log->cur);
drivers/android/binder.c
239
if (cur >= ARRAY_SIZE(log->entry))
drivers/android/binder.c
241
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
drivers/android/binder.c
6968
unsigned int log_cur = atomic_read(&log->cur);
drivers/android/binder.c
6970
unsigned int cur;
drivers/android/binder.c
6974
cur = count < ARRAY_SIZE(log->entry) && !log->full ?
drivers/android/binder.c
6979
unsigned int index = cur++ % ARRAY_SIZE(log->entry);
drivers/android/binder.c
7104
atomic_set(&binder_transaction_log.cur, ~0U);
drivers/android/binder.c
7105
atomic_set(&binder_transaction_log_failed.cur, ~0U);
drivers/ata/libata-core.c
5901
const struct ata_port_operations *cur;
drivers/ata/libata-core.c
5911
for (cur = ops->inherits; cur; cur = cur->inherits) {
drivers/ata/libata-core.c
5912
void **inherit = (void **)cur;
drivers/ata/libata-core.c
6603
static int __init ata_parse_force_one(char **cur,
drivers/ata/libata-core.c
6607
char *start = *cur, *p = *cur;
drivers/ata/libata-core.c
6618
*cur = p;
drivers/ata/libata-core.c
6620
*cur = p + 1;
drivers/ata/libata-core.c
6713
char *p, *cur, *next;
drivers/ata/libata-core.c
6728
for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
drivers/ata/libata-core.c
6732
next = cur;
drivers/ata/libata-core.c
6736
cur, reason);
drivers/ata/libata-eh.c
308
const u8 *cur;
drivers/ata/libata-eh.c
310
for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
drivers/ata/libata-eh.c
311
if (*cur == cmd)
drivers/ata/libata-sata.c
241
u32 last, cur;
drivers/ata/libata-sata.c
248
if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
drivers/ata/libata-sata.c
250
cur &= 0xf;
drivers/ata/libata-sata.c
252
last = cur;
drivers/ata/libata-sata.c
257
if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
drivers/ata/libata-sata.c
259
cur &= 0xf;
drivers/ata/libata-sata.c
262
if (cur == last) {
drivers/ata/libata-sata.c
263
if (cur == 1 && time_before(jiffies, deadline))
drivers/ata/libata-sata.c
272
last = cur;
drivers/base/regmap/regcache.c
806
unsigned int base, unsigned int cur)
drivers/base/regmap/regcache.c
814
count = (cur - base) / map->reg_stride;
drivers/base/regmap/regcache.c
817
count * val_bytes, count, base, cur - map->reg_stride);
drivers/base/regmap/regcache.c
824
base, cur - map->reg_stride, ret);
drivers/block/drbd/drbd_main.c
1157
len = bs.cur.b - p->code + !!bs.cur.bit;
drivers/block/drbd/drbd_main.c
1173
dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
drivers/block/drbd/drbd_receiver.c
4539
(unsigned int)(bs.cur.b - p->code),
drivers/block/drbd/drbd_vli.h
199
static inline void bitstream_cursor_reset(struct bitstream_cursor *cur, void *s)
drivers/block/drbd/drbd_vli.h
201
cur->b = s;
drivers/block/drbd/drbd_vli.h
202
cur->bit = 0;
drivers/block/drbd/drbd_vli.h
207
static inline void bitstream_cursor_advance(struct bitstream_cursor *cur, unsigned int bits)
drivers/block/drbd/drbd_vli.h
209
bits += cur->bit;
drivers/block/drbd/drbd_vli.h
210
cur->b = cur->b + (bits >> 3);
drivers/block/drbd/drbd_vli.h
211
cur->bit = bits & 7;
drivers/block/drbd/drbd_vli.h
216
struct bitstream_cursor cur;
drivers/block/drbd/drbd_vli.h
231
bitstream_cursor_reset(&bs->cur, bs->buf);
drivers/block/drbd/drbd_vli.h
236
bitstream_cursor_reset(&bs->cur, bs->buf);
drivers/block/drbd/drbd_vli.h
250
unsigned char *b = bs->cur.b;
drivers/block/drbd/drbd_vli.h
256
if ((bs->cur.b + ((bs->cur.bit + bits -1) >> 3)) - bs->buf >= bs->buf_len)
drivers/block/drbd/drbd_vli.h
263
*b++ |= (val & 0xff) << bs->cur.bit;
drivers/block/drbd/drbd_vli.h
265
for (tmp = 8 - bs->cur.bit; tmp < bits; tmp += 8)
drivers/block/drbd/drbd_vli.h
268
bitstream_cursor_advance(&bs->cur, bits);
drivers/block/drbd/drbd_vli.h
289
if (bs->cur.b + ((bs->cur.bit + bs->pad_bits + bits -1) >> 3) - bs->buf >= bs->buf_len)
drivers/block/drbd/drbd_vli.h
290
bits = ((bs->buf_len - (bs->cur.b - bs->buf)) << 3)
drivers/block/drbd/drbd_vli.h
291
- bs->cur.bit - bs->pad_bits;
drivers/block/drbd/drbd_vli.h
300
n = (bs->cur.bit + bits + 7) >> 3;
drivers/block/drbd/drbd_vli.h
304
memcpy(&val, bs->cur.b+1, n - 1);
drivers/block/drbd/drbd_vli.h
305
val = le64_to_cpu(val) << (8 - bs->cur.bit);
drivers/block/drbd/drbd_vli.h
309
val |= bs->cur.b[0] >> bs->cur.bit;
drivers/block/drbd/drbd_vli.h
314
bitstream_cursor_advance(&bs->cur, bits);
drivers/block/mtip32xx/mtip32xx.c
2690
attr242.cur, le32_to_cpu(attr242.data));
drivers/block/mtip32xx/mtip32xx.h
164
u8 cur;
drivers/char/agp/isoch.c
137
cur = list_entry(pos, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
138
dev = cur->dev;
drivers/char/agp/isoch.c
140
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
drivers/char/agp/isoch.c
145
master[cdev].dev = cur;
drivers/char/agp/isoch.c
248
cur = master[cdev].dev;
drivers/char/agp/isoch.c
249
dev = cur->dev;
drivers/char/agp/isoch.c
25
struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
254
pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
drivers/char/agp/isoch.c
255
pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
drivers/char/agp/isoch.c
265
pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
drivers/char/agp/isoch.c
266
pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
drivers/char/agp/isoch.c
286
struct agp_3_5_dev *cur;
drivers/char/agp/isoch.c
29
cur = list_entry(pos, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
30
if (cur->maxbw > n->maxbw)
drivers/char/agp/isoch.c
300
cur = list_entry(pos, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
302
pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
drivers/char/agp/isoch.c
305
pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
drivers/char/agp/isoch.c
321
struct agp_3_5_dev *dev_list, *cur;
drivers/char/agp/isoch.c
365
if ((cur = kmalloc_obj(*cur)) == NULL) {
drivers/char/agp/isoch.c
369
cur->dev = dev;
drivers/char/agp/isoch.c
371
pos = &cur->list;
drivers/char/agp/isoch.c
38
struct agp_3_5_dev *cur;
drivers/char/agp/isoch.c
388
cur = list_entry(pos, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
389
dev = cur->dev;
drivers/char/agp/isoch.c
422
cur->capndx = mcapndx;
drivers/char/agp/isoch.c
424
pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
drivers/char/agp/isoch.c
456
cur = list_entry(pos, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
459
kfree(cur);
drivers/char/agp/isoch.c
46
cur = list_entry(pos, struct agp_3_5_dev, list);
drivers/char/agp/isoch.c
47
dev = cur->dev;
drivers/char/agp/isoch.c
49
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
drivers/char/agp/isoch.c
50
cur->maxbw = (nistat >> 16) & 0xff;
drivers/char/agp/isoch.c
82
struct agp_3_5_dev *cur;
drivers/char/bsr.c
157
struct bsr_dev *cur, *n;
drivers/char/bsr.c
159
list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
drivers/char/bsr.c
160
if (cur->bsr_device) {
drivers/char/bsr.c
161
cdev_del(&cur->bsr_cdev);
drivers/char/bsr.c
162
device_del(cur->bsr_device);
drivers/char/bsr.c
164
list_del(&cur->bsr_list);
drivers/char/bsr.c
165
kfree(cur);
drivers/char/bsr.c
189
struct bsr_dev *cur = kzalloc_obj(struct bsr_dev);
drivers/char/bsr.c
193
if (!cur) {
drivers/char/bsr.c
202
kfree(cur);
drivers/char/bsr.c
206
cur->bsr_minor = i + total_bsr_devs;
drivers/char/bsr.c
207
cur->bsr_addr = res.start;
drivers/char/bsr.c
208
cur->bsr_len = resource_size(&res);
drivers/char/bsr.c
209
cur->bsr_bytes = bsr_bytes[i];
drivers/char/bsr.c
210
cur->bsr_stride = bsr_stride[i];
drivers/char/bsr.c
211
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
drivers/char/bsr.c
215
if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
drivers/char/bsr.c
216
cur->bsr_len = 4096;
drivers/char/bsr.c
218
switch(cur->bsr_bytes) {
drivers/char/bsr.c
220
cur->bsr_type = BSR_8;
drivers/char/bsr.c
223
cur->bsr_type = BSR_16;
drivers/char/bsr.c
226
cur->bsr_type = BSR_64;
drivers/char/bsr.c
229
cur->bsr_type = BSR_128;
drivers/char/bsr.c
232
cur->bsr_type = BSR_4096;
drivers/char/bsr.c
235
cur->bsr_type = BSR_UNKNOWN;
drivers/char/bsr.c
238
cur->bsr_num = bsr_types[cur->bsr_type];
drivers/char/bsr.c
239
snprintf(cur->bsr_name, 32, "bsr%d_%d",
drivers/char/bsr.c
240
cur->bsr_bytes, cur->bsr_num);
drivers/char/bsr.c
242
cdev_init(&cur->bsr_cdev, &bsr_fops);
drivers/char/bsr.c
243
result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
drivers/char/bsr.c
245
kfree(cur);
drivers/char/bsr.c
249
cur->bsr_device = device_create(&bsr_class, NULL, cur->bsr_dev,
drivers/char/bsr.c
250
cur, "%s", cur->bsr_name);
drivers/char/bsr.c
251
if (IS_ERR(cur->bsr_device)) {
drivers/char/bsr.c
253
cur->bsr_name);
drivers/char/bsr.c
254
cdev_del(&cur->bsr_cdev);
drivers/char/bsr.c
255
kfree(cur);
drivers/char/bsr.c
259
bsr_types[cur->bsr_type] = cur->bsr_num + 1;
drivers/char/bsr.c
260
list_add_tail(&cur->bsr_list, &bsr_devs);
drivers/char/tpm/tpm_tis_i2c_cr50.c
472
size_t burstcnt, cur, len, expected;
drivers/char/tpm/tpm_tis_i2c_cr50.c
508
cur = burstcnt;
drivers/char/tpm/tpm_tis_i2c_cr50.c
509
while (cur < expected) {
drivers/char/tpm/tpm_tis_i2c_cr50.c
515
len = min_t(size_t, burstcnt, expected - cur);
drivers/char/tpm/tpm_tis_i2c_cr50.c
516
rc = tpm_cr50_i2c_read(chip, addr, buf + cur, len);
drivers/char/tpm/tpm_tis_i2c_cr50.c
522
cur += len;
drivers/char/tpm/tpm_tis_i2c_cr50.c
535
return cur;
drivers/clk/rockchip/clk-pll.c
172
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
175
rockchip_rk3036_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
177
rate64 *= cur.fbdiv;
drivers/clk/rockchip/clk-pll.c
178
do_div(rate64, cur.refdiv);
drivers/clk/rockchip/clk-pll.c
180
if (cur.dsmpd == 0) {
drivers/clk/rockchip/clk-pll.c
182
u64 frac_rate64 = prate * cur.frac;
drivers/clk/rockchip/clk-pll.c
184
do_div(frac_rate64, cur.refdiv);
drivers/clk/rockchip/clk-pll.c
188
do_div(rate64, cur.postdiv1);
drivers/clk/rockchip/clk-pll.c
189
do_div(rate64, cur.postdiv2);
drivers/clk/rockchip/clk-pll.c
199
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
209
rockchip_rk3036_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
210
cur.rate = 0;
drivers/clk/rockchip/clk-pll.c
246
rockchip_rk3036_pll_set_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
307
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
320
rockchip_rk3036_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
325
cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2,
drivers/clk/rockchip/clk-pll.c
326
cur.dsmpd, cur.frac);
drivers/clk/rockchip/clk-pll.c
331
if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
drivers/clk/rockchip/clk-pll.c
332
rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
drivers/clk/rockchip/clk-pll.c
333
rate->dsmpd != cur.dsmpd ||
drivers/clk/rockchip/clk-pll.c
334
(!cur.dsmpd && (rate->frac != cur.frac))) {
drivers/clk/rockchip/clk-pll.c
411
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
422
rockchip_rk3066_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
424
rate64 *= cur.nf;
drivers/clk/rockchip/clk-pll.c
425
do_div(rate64, cur.nr);
drivers/clk/rockchip/clk-pll.c
426
do_div(rate64, cur.no);
drivers/clk/rockchip/clk-pll.c
436
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
444
rockchip_rk3066_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
445
cur.rate = 0;
drivers/clk/rockchip/clk-pll.c
481
rockchip_rk3066_pll_set_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
542
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
555
rockchip_rk3066_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
558
__func__, clk_hw_get_name(hw), drate, rate->nr, cur.nr,
drivers/clk/rockchip/clk-pll.c
559
rate->no, cur.no, rate->nf, cur.nf, rate->nb, cur.nb);
drivers/clk/rockchip/clk-pll.c
560
if (rate->nr != cur.nr || rate->no != cur.no || rate->nf != cur.nf
drivers/clk/rockchip/clk-pll.c
561
|| rate->nb != cur.nb) {
drivers/clk/rockchip/clk-pll.c
656
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
659
rockchip_rk3399_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
661
rate64 *= cur.fbdiv;
drivers/clk/rockchip/clk-pll.c
662
do_div(rate64, cur.refdiv);
drivers/clk/rockchip/clk-pll.c
664
if (cur.dsmpd == 0) {
drivers/clk/rockchip/clk-pll.c
666
u64 frac_rate64 = prate * cur.frac;
drivers/clk/rockchip/clk-pll.c
668
do_div(frac_rate64, cur.refdiv);
drivers/clk/rockchip/clk-pll.c
672
do_div(rate64, cur.postdiv1);
drivers/clk/rockchip/clk-pll.c
673
do_div(rate64, cur.postdiv2);
drivers/clk/rockchip/clk-pll.c
683
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
693
rockchip_rk3399_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
694
cur.rate = 0;
drivers/clk/rockchip/clk-pll.c
730
rockchip_rk3399_pll_set_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
791
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
804
rockchip_rk3399_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
809
cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2,
drivers/clk/rockchip/clk-pll.c
810
cur.dsmpd, cur.frac);
drivers/clk/rockchip/clk-pll.c
815
if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
drivers/clk/rockchip/clk-pll.c
816
rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
drivers/clk/rockchip/clk-pll.c
817
rate->dsmpd != cur.dsmpd ||
drivers/clk/rockchip/clk-pll.c
818
(!cur.dsmpd && (rate->frac != cur.frac))) {
drivers/clk/rockchip/clk-pll.c
906
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
909
rockchip_rk3588_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
911
rate64 *= cur.m;
drivers/clk/rockchip/clk-pll.c
912
do_div(rate64, cur.p);
drivers/clk/rockchip/clk-pll.c
914
if (cur.k) {
drivers/clk/rockchip/clk-pll.c
916
u64 frac_rate64 = prate * cur.k;
drivers/clk/rockchip/clk-pll.c
918
postdiv = cur.p * 65535;
drivers/clk/rockchip/clk-pll.c
922
rate64 = rate64 >> cur.s;
drivers/clk/rockchip/clk-pll.c
935
struct rockchip_pll_rate_table cur;
drivers/clk/rockchip/clk-pll.c
943
rockchip_rk3588_pll_get_params(pll, &cur);
drivers/clk/rockchip/clk-pll.c
944
cur.rate = 0;
drivers/clk/rockchip/clk-pll.c
979
rockchip_rk3588_pll_set_params(pll, &cur);
drivers/comedi/kcomedilib/kcomedilib_main.c
40
unsigned int cur = 0;
drivers/comedi/kcomedilib/kcomedilib_main.c
62
bitmap_zero(destinations[cur], COMEDI_NUM_BOARD_MINORS);
drivers/comedi/kcomedilib/kcomedilib_main.c
63
set_bit(from, destinations[cur]);
drivers/comedi/kcomedilib/kcomedilib_main.c
66
unsigned int next = 1 - cur;
drivers/comedi/kcomedilib/kcomedilib_main.c
69
if (test_bit(to, destinations[cur])) {
drivers/comedi/kcomedilib/kcomedilib_main.c
76
while ((t = find_next_bit(destinations[cur],
drivers/comedi/kcomedilib/kcomedilib_main.c
87
cur = next;
drivers/comedi/kcomedilib/kcomedilib_main.c
88
} while (!bitmap_empty(destinations[cur], COMEDI_NUM_BOARD_MINORS));
drivers/cpufreq/acpi-cpufreq.c
872
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
drivers/cpufreq/amd-pstate.c
1035
policy->cur = policy->cpuinfo.min_freq;
drivers/cpufreq/amd-pstate.c
1516
policy->cur = policy->cpuinfo.min_freq;
drivers/cpufreq/amd-pstate.c
1605
policy->cur = policy->min;
drivers/cpufreq/amd-pstate.c
1661
int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur);
drivers/cpufreq/amd-pstate.c
552
cpudata->cur.aperf = aperf;
drivers/cpufreq/amd-pstate.c
553
cpudata->cur.mperf = mperf;
drivers/cpufreq/amd-pstate.c
554
cpudata->cur.tsc = tsc;
drivers/cpufreq/amd-pstate.c
555
cpudata->cur.aperf -= cpudata->prev.aperf;
drivers/cpufreq/amd-pstate.c
556
cpudata->cur.mperf -= cpudata->prev.mperf;
drivers/cpufreq/amd-pstate.c
557
cpudata->cur.tsc -= cpudata->prev.tsc;
drivers/cpufreq/amd-pstate.c
563
cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
drivers/cpufreq/amd-pstate.c
583
policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
drivers/cpufreq/amd-pstate.c
592
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
drivers/cpufreq/amd-pstate.c
677
freqs.old = policy->cur;
drivers/cpufreq/amd-pstate.c
713
return policy->cur;
drivers/cpufreq/amd-pstate.h
99
struct amd_aperf_mperf cur;
drivers/cpufreq/amd_freq_sensitivity.c
63
freq_next = policy->cur;
drivers/cpufreq/amd_freq_sensitivity.c
72
freq_next = policy->cur;
drivers/cpufreq/amd_freq_sensitivity.c
83
if (data->freq_prev == policy->cur)
drivers/cpufreq/amd_freq_sensitivity.c
84
freq_next = policy->cur;
drivers/cpufreq/amd_freq_sensitivity.c
86
if (freq_next > policy->cur)
drivers/cpufreq/amd_freq_sensitivity.c
87
freq_next = policy->cur;
drivers/cpufreq/amd_freq_sensitivity.c
88
else if (freq_next < policy->cur)
drivers/cpufreq/amd_freq_sensitivity.c
94
policy->cur - 1,
drivers/cpufreq/brcmstb-avs-cpufreq.c
648
policy->cur = freq_table[pstate].frequency;
drivers/cpufreq/cppc_cpufreq.c
301
freqs.old = policy->cur;
drivers/cpufreq/cppc_cpufreq.c
681
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
drivers/cpufreq/cpufreq.c
1494
policy->cur = cpufreq_driver->get(policy->cpu);
drivers/cpufreq/cpufreq.c
1495
if (!policy->cur) {
drivers/cpufreq/cpufreq.c
1522
unsigned int old_freq = policy->cur;
drivers/cpufreq/cpufreq.c
1537
__func__, policy->cpu, old_freq, policy->cur);
drivers/cpufreq/cpufreq.c
1799
policy->cur, new_freq);
drivers/cpufreq/cpufreq.c
1801
freqs.old = policy->cur;
drivers/cpufreq/cpufreq.c
1826
if (policy->cur != new_freq) {
drivers/cpufreq/cpufreq.c
1834
if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
drivers/cpufreq/cpufreq.c
1835
return policy->cur;
drivers/cpufreq/cpufreq.c
1870
return policy->cur;
drivers/cpufreq/cpufreq.c
2215
policy->cur = freq;
drivers/cpufreq/cpufreq.c
2297
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
drivers/cpufreq/cpufreq.c
2303
if (newfreq == policy->cur)
drivers/cpufreq/cpufreq.c
2307
restore_freq = policy->cur;
drivers/cpufreq/cpufreq.c
2376
if (target_freq == policy->cur &&
drivers/cpufreq/cpufreq.c
337
if (policy->cur && policy->cur != freqs->old) {
drivers/cpufreq/cpufreq.c
339
freqs->old, policy->cur);
drivers/cpufreq/cpufreq.c
340
freqs->old = policy->cur;
drivers/cpufreq/cpufreq.c
361
policy->cur = freqs->new;
drivers/cpufreq/cpufreq.c
421
policy->cur,
drivers/cpufreq/cpufreq.c
745
ret = sysfs_emit(buf, "%u\n", policy->cur);
drivers/cpufreq/cpufreq_conservative.c
313
dbs_info->requested_freq = policy->cur;
drivers/cpufreq/cpufreq_conservative.c
324
dbs_info->requested_freq = policy->cur;
drivers/cpufreq/cpufreq_conservative.c
81
requested_freq = policy->cur;
drivers/cpufreq/cpufreq_governor.c
522
if (!policy->cur)
drivers/cpufreq/cpufreq_ondemand.c
101
else if (policy->cur == policy->max)
drivers/cpufreq/cpufreq_ondemand.c
126
if (policy->cur < policy->max)
drivers/cpufreq/cpufreq_stats.c
252
stats->last_index = freq_table_get_index(stats, policy->cur);
drivers/cpufreq/cpufreq_userspace.c
114
policy->cpu, policy->min, policy->max, policy->cur, userspace->setspeed);
drivers/cpufreq/cpufreq_userspace.c
85
BUG_ON(!policy->cur);
drivers/cpufreq/cpufreq_userspace.c
90
userspace->setspeed = policy->cur;
drivers/cpufreq/davinci-cpufreq.c
42
old_freq = policy->cur;
drivers/cpufreq/intel_pstate.c
2929
policy->cur = policy->min;
drivers/cpufreq/intel_pstate.c
3214
freqs.old = policy->cur;
drivers/cpufreq/intel_pstate.c
3304
policy->cur = policy->cpuinfo.min_freq;
drivers/cpufreq/longhaul.c
962
freqs.old = policy->cur;
drivers/cpufreq/pasemi-cpufreq.c
195
policy->cur = pas_freqs[cur_astate].frequency;
drivers/cpufreq/pasemi-cpufreq.c
196
ppc_proc_freq = policy->cur * 1000ul;
drivers/cpufreq/pcc-cpufreq.c
216
freqs.old = policy->cur;
drivers/cpufreq/powernow-k6.c
230
freqs.old = policy->cur;
drivers/cpufreq/powernow-k8.c
978
pol->cur = find_khz_freq_from_fid(data->currfid);
drivers/cpufreq/powernv-cpufreq.c
935
index = cpufreq_table_find_index_c(policy, policy->cur, false);
drivers/cpufreq/pxa2xx-cpufreq.c
199
policy->cur / 1000, new_freq_cpu / 1000);
drivers/cpufreq/pxa2xx-cpufreq.c
201
if (vcc_core && new_freq_cpu > policy->cur) {
drivers/cpufreq/pxa2xx-cpufreq.c
218
if (vcc_core && new_freq_cpu < policy->cur)
drivers/cpufreq/s5pv210-cpufreq.c
242
old_freq = policy->cur;
drivers/cpufreq/sparc-us2e-cpufreq.c
293
policy->cur = clock_tick;
drivers/cpufreq/sparc-us3-cpufreq.c
137
policy->cur = clock_tick;
drivers/cpufreq/virtual-cpufreq.c
122
freqs.old = policy->cur;
drivers/cpufreq/virtual-cpufreq.c
170
policy->cur = policy->max;
drivers/crypto/intel/qat/qat_common/qat_uclo.c
443
char *chunk_id, void *cur)
drivers/crypto/intel/qat/qat_common/qat_uclo.c
451
if ((cur < (void *)&chunk_hdr[i]) &&
drivers/crypto/stm32/stm32-cryp.c
1486
struct scatterlist *cur;
drivers/crypto/stm32/stm32-cryp.c
1507
cur = *new_sg;
drivers/crypto/stm32/stm32-cryp.c
1530
sg_set_page(cur, sg_page(sg), len, offset);
drivers/crypto/stm32/stm32-cryp.c
1532
sg_mark_end(cur);
drivers/crypto/stm32/stm32-cryp.c
1533
cur = sg_next(cur);
drivers/devfreq/governor_passive.c
336
parent_cpu_data->cur_freq = policy->cur;
drivers/dma-buf/dma-fence.c
365
struct dma_fence_cb *cur, *tmp;
drivers/dma-buf/dma-fence.c
381
list_for_each_entry_safe(cur, tmp, &cb_list, node) {
drivers/dma-buf/dma-fence.c
382
INIT_LIST_HEAD(&cur->node);
drivers/dma-buf/dma-fence.c
383
cur->func(fence, cur);
drivers/firmware/google/memconsole-x86-legacy.c
101
for (cur = 0; cur < length; cur++) {
drivers/firmware/google/memconsole-x86-legacy.c
102
struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
drivers/firmware/google/memconsole-x86-legacy.c
85
size_t length, cur;
drivers/gpib/common/gpib_os.c
434
struct list_head *cur;
drivers/gpib/common/gpib_os.c
448
for (cur = head->next; cur != head; cur = cur->next) {
drivers/gpib/common/gpib_os.c
449
device = list_entry(cur, struct gpib_status_queue, list);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
100
cur->size = min((node->size << PAGE_SHIFT) - start, size);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
101
cur->remaining = size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
102
cur->node = node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
111
cur->start = start;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
112
cur->size = size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
113
cur->remaining = size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
114
cur->node = NULL;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
126
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
132
BUG_ON(size > cur->remaining);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
134
cur->remaining -= size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
135
if (!cur->remaining)
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
138
cur->size -= size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
139
if (cur->size) {
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
140
cur->start += size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
144
switch (cur->mem_type) {
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
146
block = cur->node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
151
cur->node = block;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
152
cur->start = amdgpu_vram_mgr_block_start(block);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
153
cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
158
node = cur->node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
160
cur->node = ++node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
161
cur->start = node->start << PAGE_SHIFT;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
162
cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
176
static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
180
switch (cur->mem_type) {
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
182
block = cur->node;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
56
struct amdgpu_res_cursor *cur)
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
67
cur->mem_type = res->mem_type;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
69
switch (cur->mem_type) {
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
87
cur->start = amdgpu_vram_mgr_block_start(block) + start;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
88
cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
89
cur->remaining = size;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
90
cur->node = block;
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
99
cur->start = (node->start << PAGE_SHIFT) + start;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
535
unsigned cur;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
543
cur = (ring->wptr - 1) & ring->buf_mask;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
544
if (cur < offset)
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
545
cur += ring->ring_size >> 2;
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
546
ring->ring[offset] = cur - offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1102
struct amdgpu_res_cursor cur;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1109
amdgpu_res_first(res, 0, res->size, &cur);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1112
phys = adev->rmmio_remap.bus_addr + cur.start;
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1124
sg_set_page(sg, NULL, cur.size, 0); /* WHY: I/O space → no pages */
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1126
dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC);
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
1133
sg_dma_len(sg) = cur.size;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1439
struct device_process_node *cur, *next;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1446
list_for_each_entry_safe(cur, next, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1447
if (qpd == cur->qpd) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1448
list_del(&cur->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
1449
kfree(cur);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
161
struct device_process_node *cur;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
166
list_for_each_entry(cur, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
167
qpd = cur->qpd;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2191
struct device_process_node *cur;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2195
list_for_each_entry(cur, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2196
qpd = cur->qpd;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2281
struct device_process_node *cur;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2285
list_for_each_entry(cur, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2286
qpd = cur->qpd;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2649
struct device_process_node *cur, *next_dpn;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2672
list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2673
if (qpd == cur->qpd) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2674
list_del(&cur->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2675
kfree(cur);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2781
struct device_process_node *cur, *next_dpn;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2819
list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2820
if (qpd == cur->qpd) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2821
list_del(&cur->list);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
2822
kfree(cur);
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
310
struct device_process_node *cur;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
316
list_for_each_entry(cur, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
317
qpd = cur->qpd;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
337
struct device_process_node *cur;
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
343
list_for_each_entry(cur, &dqm->queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
344
qpd = cur->qpd;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
146
struct device_process_node *cur;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
157
list_for_each_entry(cur, queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
158
qpd = cur->qpd;
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
183
list_for_each_entry(cur, queues, list) {
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
184
qpd = cur->qpd;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
428
struct dm_crtc_state *cur = to_dm_crtc_state(state);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
431
if (cur->stream)
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
432
dc_stream_release(cur->stream);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
443
struct dm_crtc_state *state, *cur;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
445
cur = to_dm_crtc_state(crtc->state);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
456
if (cur->stream) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
457
state->stream = cur->stream;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
461
state->active_planes = cur->active_planes;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
462
state->vrr_infopacket = cur->vrr_infopacket;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
463
state->abm_level = cur->abm_level;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
464
state->vrr_supported = cur->vrr_supported;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
465
state->freesync_config = cur->freesync_config;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
466
state->cm_has_degamma = cur->cm_has_degamma;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
467
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
468
state->regamma_tf = cur->regamma_tf;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
469
state->crc_skip_count = cur->crc_skip_count;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
470
state->mpo_requested = cur->mpo_requested;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
471
state->cursor_mode = cur->cursor_mode;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
613
struct dc_link_settings *cur,
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
624
if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
625
dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
652
cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
653
cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
671
struct dc_link_settings *cur,
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
677
cur, training_result);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
685
if (!reached_minimum_link_rate(cur->link_rate)) {
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
686
cur->link_rate = reduce_link_rate(link, cur->link_rate);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
687
} else if (!reached_minimum_lane_count(cur->lane_count)) {
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
688
cur->link_rate = max->link_rate;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
692
cur->lane_count = LANE_COUNT_ONE;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
694
cur->lane_count = LANE_COUNT_TWO;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
696
cur->lane_count = reduce_lane_count(cur->lane_count);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
705
if (!reached_minimum_lane_count(cur->lane_count)) {
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
706
cur->lane_count = reduce_lane_count(cur->lane_count);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
707
} else if (!reached_minimum_link_rate(cur->link_rate)) {
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
708
cur->link_rate = reduce_link_rate(link, cur->link_rate);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
714
max->link_rate = cur->link_rate;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
715
cur->lane_count = max->lane_count;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
723
if (!reached_minimum_link_rate(cur->link_rate)) {
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
724
cur->link_rate = reduce_link_rate(link, cur->link_rate);
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
730
max->link_rate = cur->link_rate;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
731
cur->lane_count = max->lane_count;
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
99
struct dc_link_settings *cur,
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
551
struct atmel_hlcdc_crtc_state *state, *cur;
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
562
cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
563
state->output_mode = cur->output_mode;
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
565
state->dpi = cur->dpi;
drivers/gpu/drm/drm_gpusvm.c
818
const struct dev_pagemap *cur = NULL;
drivers/gpu/drm/drm_gpusvm.c
828
cur = page_pgmap(page);
drivers/gpu/drm/drm_gpusvm.c
830
if (cur == pagemap) {
drivers/gpu/drm/drm_gpusvm.c
832
} else if (cur && (cur == other || !other)) {
drivers/gpu/drm/drm_gpusvm.c
834
other = cur;
drivers/gpu/drm/drm_gpusvm.c
835
} else if (cur) {
drivers/gpu/drm/drm_pagemap.c
410
const struct migrate_range_loc *cur,
drivers/gpu/drm/drm_pagemap.c
415
if (cur->start == 0)
drivers/gpu/drm/drm_pagemap.c
418
if (cur->start <= last->start)
drivers/gpu/drm/drm_pagemap.c
421
if (cur->dpagemap == last->dpagemap && cur->ops == last->ops)
drivers/gpu/drm/drm_pagemap.c
431
cur->start - last->start,
drivers/gpu/drm/drm_pagemap.c
439
cur->start - last->start,
drivers/gpu/drm/drm_pagemap.c
443
*last = *cur;
drivers/gpu/drm/drm_pagemap.c
488
struct migrate_range_loc cur, last = {.device = dpagemap->drm->dev, .ops = ops};
drivers/gpu/drm/drm_pagemap.c
592
cur.start = i;
drivers/gpu/drm/drm_pagemap.c
605
cur.dpagemap = src_zdd->dpagemap;
drivers/gpu/drm/drm_pagemap.c
606
cur.ops = src_zdd->devmem_allocation->ops;
drivers/gpu/drm/drm_pagemap.c
607
cur.device = cur.dpagemap->drm->dev;
drivers/gpu/drm/drm_pagemap.c
612
cur.dpagemap = NULL;
drivers/gpu/drm/drm_pagemap.c
613
cur.ops = ops;
drivers/gpu/drm/drm_pagemap.c
614
cur.device = dpagemap->drm->dev;
drivers/gpu/drm/drm_pagemap.c
622
pages, pagemap_addr, &last, &cur,
drivers/gpu/drm/drm_pagemap.c
629
cur.start = npages;
drivers/gpu/drm/drm_pagemap.c
630
cur.ops = NULL; /* Force migration */
drivers/gpu/drm/drm_pagemap.c
632
pages, pagemap_addr, &last, &cur, mdetails);
drivers/gpu/drm/gma500/cdv_intel_dp.c
2014
struct edp_power_seq cur;
drivers/gpu/drm/gma500/cdv_intel_dp.c
2033
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
drivers/gpu/drm/gma500/cdv_intel_dp.c
2036
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
drivers/gpu/drm/gma500/cdv_intel_dp.c
2039
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
drivers/gpu/drm/gma500/cdv_intel_dp.c
2042
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
drivers/gpu/drm/gma500/cdv_intel_dp.c
2045
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
drivers/gpu/drm/gma500/cdv_intel_dp.c
2049
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
drivers/gpu/drm/gma500/cdv_intel_dp.c
2052
intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
drivers/gpu/drm/gma500/cdv_intel_dp.c
2053
intel_dp->backlight_on_delay = cur.t8 / 10;
drivers/gpu/drm/gma500/cdv_intel_dp.c
2054
intel_dp->backlight_off_delay = cur.t9 / 10;
drivers/gpu/drm/gma500/cdv_intel_dp.c
2055
intel_dp->panel_power_down_delay = cur.t10 / 10;
drivers/gpu/drm/gma500/cdv_intel_dp.c
2056
intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
drivers/gpu/drm/i915/display/i9xx_wm.c
2438
u16 cur;
drivers/gpu/drm/i915/display/i9xx_wm.c
2649
max->cur = ilk_cursor_wm_max(display, level, config);
drivers/gpu/drm/i915/display/i9xx_wm.c
2659
max->cur = ilk_cursor_wm_reg_max(display, level);
drivers/gpu/drm/i915/display/i9xx_wm.c
2676
result->cur_val <= max->cur;
drivers/gpu/drm/i915/display/i9xx_wm.c
2694
if (result->cur_val > max->cur)
drivers/gpu/drm/i915/display/i9xx_wm.c
2697
level, result->cur_val, max->cur);
drivers/gpu/drm/i915/display/i9xx_wm.c
2701
result->cur_val = min_t(u32, result->cur_val, max->cur);
drivers/gpu/drm/i915/display/intel_pps.c
1518
struct intel_pps_delays cur, vbt, spec,
drivers/gpu/drm/i915/display/intel_pps.c
1527
pps_init_delays_bios(intel_dp, &cur);
drivers/gpu/drm/i915/display/intel_pps.c
1533
#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
drivers/gpu/drm/i915/display/intel_pps.c
1535
max(cur.field, vbt.field))
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
29
struct i915_frontbuffer *front, *cur;
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
52
cur = rcu_dereference_protected(obj->frontbuffer, true);
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
53
kref_get(&cur->ref);
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
55
cur = front;
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
60
if (cur != front) {
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
66
return cur;
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
22
u32 *cur;
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
26
cur = i915_gem_object_pin_map(obj, map_type);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
27
if (IS_ERR(cur))
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
28
return PTR_ERR(cur);
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
32
*cur++ = i;
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
35
if (*cur++ != i) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
149
struct dma_fence_cb *cur, *tmp;
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
153
list_for_each_entry_safe(cur, tmp, list, node) {
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
154
INIT_LIST_HEAD(&cur->node);
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
155
cur->func(fence, cur);
drivers/gpu/drm/i915/gt/intel_engine.h
136
struct i915_request * const *cur, * const *old, *active;
drivers/gpu/drm/i915/gt/intel_engine.h
138
cur = READ_ONCE(execlists->active);
drivers/gpu/drm/i915/gt/intel_engine.h
141
old = cur;
drivers/gpu/drm/i915/gt/intel_engine.h
143
active = READ_ONCE(*cur);
drivers/gpu/drm/i915/gt/intel_engine.h
144
cur = READ_ONCE(execlists->active);
drivers/gpu/drm/i915/gt/intel_engine.h
147
} while (unlikely(cur != old));
drivers/gpu/drm/i915/gt/intel_tlb.c
115
u32 cur = intel_gt_tlb_seqno(gt);
drivers/gpu/drm/i915/gt/intel_tlb.c
118
return (s32)(cur - ALIGN(seqno, 2)) > 0;
drivers/gpu/drm/i915/gt/intel_workarounds.c
1742
wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
drivers/gpu/drm/i915/gt/intel_workarounds.c
1745
if ((cur ^ wa->set) & wa->read) {
drivers/gpu/drm/i915/gt/intel_workarounds.c
1749
cur, cur & wa->read, wa->set & wa->read);
drivers/gpu/drm/i915/i915_gpu_error.c
100
e->cur->offset = 0;
drivers/gpu/drm/i915/i915_gpu_error.c
101
e->cur->length = 0;
drivers/gpu/drm/i915/i915_gpu_error.c
102
e->cur->page_link =
drivers/gpu/drm/i915/i915_gpu_error.c
108
e->cur = sgl;
drivers/gpu/drm/i915/i915_gpu_error.c
84
__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
drivers/gpu/drm/i915/i915_gpu_error.c
90
if (e->cur == e->end) {
drivers/gpu/drm/i915/i915_gpu_error.c
983
__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
drivers/gpu/drm/i915/i915_gpu_error.c
987
if (m.cur) {
drivers/gpu/drm/i915/i915_gpu_error.c
988
GEM_BUG_ON(m.end < m.cur);
drivers/gpu/drm/i915/i915_gpu_error.c
989
sg_mark_end(m.cur - 1);
drivers/gpu/drm/i915/i915_gpu_error.c
99
if (e->cur) {
drivers/gpu/drm/i915/i915_gpu_error.c
991
GEM_BUG_ON(m.sgl && !m.cur);
drivers/gpu/drm/i915/i915_gpu_error.h
234
struct scatterlist *sgl, *cur, *end;
drivers/gpu/drm/i915/i915_pmu.c
205
return pmu->sample[gt_id][sample].cur;
drivers/gpu/drm/i915/i915_pmu.c
211
pmu->sample[gt_id][sample].cur = val;
drivers/gpu/drm/i915/i915_pmu.c
217
pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
drivers/gpu/drm/i915/i915_pmu.c
345
sample->cur += val;
drivers/gpu/drm/i915/i915_pmu.c
682
val = engine->pmu.sample[sample].cur;
drivers/gpu/drm/i915/i915_pmu.h
55
u64 cur;
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
51
ring->cur = ring->next;
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
15
enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
18
return (cur == old);
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
172
ring->cur = ring->next;
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
19
enum a6xx_preempt_state cur = atomic_cmpxchg(&a6xx_gpu->preempt_state,
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
22
return (cur == old);
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
168
ring->cur = ring->next;
drivers/gpu/drm/msm/adreno/adreno_gpu.c
667
ring->cur = ring->start;
drivers/gpu/drm/msm/adreno/adreno_gpu.c
721
ring->cur = ring->next;
drivers/gpu/drm/msm/adreno/adreno_gpu.h
733
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
54
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
63
if (new_state->hwmixer_to_crtc[cur->idx] &&
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
64
new_state->hwmixer_to_crtc[cur->idx] != crtc)
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
68
if (caps & ~cur->caps)
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
74
pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
97
if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
98
*mixer = cur;
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
31
struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
40
if (new_state->hwpipe_to_plane[cur->idx] ||
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
41
old_state->hwpipe_to_plane[cur->idx])
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
45
if (caps & ~cur->caps)
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
52
if (cur->caps & MDP_PIPE_CAP_CURSOR &&
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
59
if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
70
if (r_cur->caps != cur->caps)
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
74
if (cur->pipe > r_cur->pipe)
drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
84
*hwpipe = cur;
drivers/gpu/drm/msm/msm_ringbuffer.c
106
ring->cur = ring->start;
drivers/gpu/drm/msm/msm_ringbuffer.h
51
uint32_t *start, *end, *cur, *next;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
125
struct nouveau_bo *cur = nv_plane->cur;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
149
nv_plane->cur = nvbo;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
185
if (cur)
drivers/gpu/drm/nouveau/dispnv04/overlay.c
186
nouveau_bo_unpin(cur);
drivers/gpu/drm/nouveau/dispnv04/overlay.c
200
if (nv_plane->cur) {
drivers/gpu/drm/nouveau/dispnv04/overlay.c
201
nouveau_bo_unpin(nv_plane->cur);
drivers/gpu/drm/nouveau/dispnv04/overlay.c
202
nv_plane->cur = NULL;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
231
if (plane->cur) {
drivers/gpu/drm/nouveau/dispnv04/overlay.c
374
struct nouveau_bo *cur = nv_plane->cur;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
395
nv_plane->cur = nvbo;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
41
struct nouveau_bo *cur;
drivers/gpu/drm/nouveau/dispnv04/overlay.c
436
if (cur)
drivers/gpu/drm/nouveau/dispnv04/overlay.c
437
nouveau_bo_unpin(cur);
drivers/gpu/drm/nouveau/dispnv04/overlay.c
454
if (nv_plane->cur) {
drivers/gpu/drm/nouveau/dispnv04/overlay.c
455
nouveau_bo_unpin(nv_plane->cur);
drivers/gpu/drm/nouveau/dispnv04/overlay.c
456
nv_plane->cur = NULL;
drivers/gpu/drm/nouveau/dispnv50/disp.c
138
dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
139
if (dmac->put != dmac->cur) {
drivers/gpu/drm/nouveau/dispnv50/disp.c
152
NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
drivers/gpu/drm/nouveau/dispnv50/disp.c
153
dmac->put = dmac->cur;
drivers/gpu/drm/nouveau/dispnv50/disp.c
156
push->bgn = push->cur;
drivers/gpu/drm/nouveau/dispnv50/disp.c
163
if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
drivers/gpu/drm/nouveau/dispnv50/disp.c
164
return get - dmac->cur - 5;
drivers/gpu/drm/nouveau/dispnv50/disp.c
165
return dmac->max - dmac->cur;
drivers/gpu/drm/nouveau/dispnv50/disp.c
188
dmac->cur = 0;
drivers/gpu/drm/nouveau/dispnv50/disp.c
201
dmac->cur = push->cur - (u32 __iomem *)dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
202
if (dmac->cur + size >= dmac->max) {
drivers/gpu/drm/nouveau/dispnv50/disp.c
207
push->cur = dmac->push.mem.object.map.ptr;
drivers/gpu/drm/nouveau/dispnv50/disp.c
208
push->cur = push->cur + dmac->cur;
drivers/gpu/drm/nouveau/dispnv50/disp.c
221
push->bgn = push->bgn + dmac->cur;
drivers/gpu/drm/nouveau/dispnv50/disp.c
222
push->cur = push->bgn;
drivers/gpu/drm/nouveau/dispnv50/disp.c
223
push->end = push->cur + free;
drivers/gpu/drm/nouveau/dispnv50/disp.c
262
dmac->push.cur = dmac->push.bgn;
drivers/gpu/drm/nouveau/dispnv50/disp.h
70
u32 cur;
drivers/gpu/drm/nouveau/include/nvif/chan.h
35
u32 cur;
drivers/gpu/drm/nouveau/include/nvif/push.h
129
PUSH_ASSERT(_p->cur + _s <= _p->seg, "segment overrun"); \
drivers/gpu/drm/nouveau/include/nvif/push.h
130
PUSH_ASSERT(_p->cur + _s <= _p->end, "pushbuf overrun"); \
drivers/gpu/drm/nouveau/include/nvif/push.h
131
memcpy(_p->cur, (d), _s << 2); \
drivers/gpu/drm/nouveau/include/nvif/push.h
132
_p->cur += _s; \
drivers/gpu/drm/nouveau/include/nvif/push.h
278
__p->cur--; \
drivers/gpu/drm/nouveau/include/nvif/push.h
280
__p->cur++; \
drivers/gpu/drm/nouveau/include/nvif/push.h
42
u32 *cur;
drivers/gpu/drm/nouveau/include/nvif/push.h
50
if (push->cur + size > push->end) {
drivers/gpu/drm/nouveau/include/nvif/push.h
56
push->seg = push->cur + size;
drivers/gpu/drm/nouveau/include/nvif/push.h
64
if (push->cur != push->bgn) {
drivers/gpu/drm/nouveau/include/nvif/push.h
66
push->bgn = push->cur;
drivers/gpu/drm/nouveau/include/nvif/push.h
75
u32 __o = _ppp->cur - (u32 *)_ppp->mem.object.map.ptr; \
drivers/gpu/drm/nouveau/include/nvif/push.h
95
PUSH_ASSERT(_p->cur < _p->seg, "segment overrun"); \
drivers/gpu/drm/nouveau/include/nvif/push.h
96
PUSH_ASSERT(_p->cur < _p->end, "pushbuf overrun"); \
drivers/gpu/drm/nouveau/include/nvif/push.h
98
*_p->cur++ = _d; \
drivers/gpu/drm/nouveau/nouveau_chan.c
120
chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
drivers/gpu/drm/nouveau/nouveau_chan.c
122
chan->chan.push.bgn = chan->chan.push.cur;
drivers/gpu/drm/nouveau/nouveau_chan.c
130
chan->dma.cur = chan->dma.cur + (chan->chan.push.cur - chan->chan.push.bgn);
drivers/gpu/drm/nouveau/nouveau_chan.c
134
chan->chan.push.bgn = chan->chan.push.bgn + chan->dma.cur;
drivers/gpu/drm/nouveau/nouveau_chan.c
135
chan->chan.push.cur = chan->chan.push.bgn;
drivers/gpu/drm/nouveau/nouveau_chan.c
463
chan->dma.cur = chan->dma.put;
drivers/gpu/drm/nouveau/nouveau_chan.c
464
chan->dma.free = chan->dma.max - chan->dma.cur;
drivers/gpu/drm/nouveau/nouveau_chan.h
38
int cur;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
149
char buf[32] = {}, *tmp, *cur = buf;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
164
if (!strncasecmp(cur, "dc:", 3)) {
drivers/gpu/drm/nouveau/nouveau_debugfs.c
166
cur += 3;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
168
if (!strncasecmp(cur, "ac:", 3)) {
drivers/gpu/drm/nouveau/nouveau_debugfs.c
170
cur += 3;
drivers/gpu/drm/nouveau/nouveau_debugfs.c
173
if (!strcasecmp(cur, "none"))
drivers/gpu/drm/nouveau/nouveau_debugfs.c
176
if (!strcasecmp(cur, "auto"))
drivers/gpu/drm/nouveau/nouveau_debugfs.c
179
ret = kstrtol(cur, 16, &value);
drivers/gpu/drm/nouveau/nouveau_dma.c
106
chan->dma.free = chan->dma.max - chan->dma.cur;
drivers/gpu/drm/nouveau/nouveau_dma.c
133
chan->dma.cur =
drivers/gpu/drm/nouveau/nouveau_dma.c
143
chan->dma.free = get - chan->dma.cur - 1;
drivers/gpu/drm/nouveau/nouveau_dma.c
92
if (get <= chan->dma.cur) {
drivers/gpu/drm/nouveau/nouveau_dma.h
102
chan->dma.cur = chan->dma.put;
drivers/gpu/drm/nouveau/nouveau_dma.h
79
nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
drivers/gpu/drm/nouveau/nouveau_dma.h
91
if (chan->dma.cur == chan->dma.put)
drivers/gpu/drm/nouveau/nouveau_dma.h
94
WRITE_PUT(chan->dma.cur);
drivers/gpu/drm/nouveau/nouveau_dma.h
96
chan->dma.put = chan->dma.cur;
drivers/gpu/drm/nouveau/nouveau_gem.c
898
cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
drivers/gpu/drm/nouveau/nouveau_gem.c
972
(chan->push.addr + ((chan->dma.cur + 2) << 2));
drivers/gpu/drm/nouveau/nvif/chan.c
112
chan->push.bgn = chan->push.cur = chan->push.end = push;
drivers/gpu/drm/nouveau/nvif/chan.c
119
u32 cur = push->cur - (u32 *)push->mem.object.map.ptr;
drivers/gpu/drm/nouveau/nvif/chan.c
127
if (get <= cur) {
drivers/gpu/drm/nouveau/nvif/chan.c
128
free = push->hw.max - cur;
drivers/gpu/drm/nouveau/nvif/chan.c
143
cur = 0;
drivers/gpu/drm/nouveau/nvif/chan.c
146
free = get - cur - 1;
drivers/gpu/drm/nouveau/nvif/chan.c
15
if (push->end - push->cur < chan->func->gpfifo.post_size)
drivers/gpu/drm/nouveau/nvif/chan.c
155
push->bgn = (u32 *)push->mem.object.map.ptr + cur;
drivers/gpu/drm/nouveau/nvif/chan.c
156
push->cur = push->bgn;
drivers/gpu/drm/nouveau/nvif/chan.c
16
push->end = push->cur + chan->func->gpfifo.post_size;
drivers/gpu/drm/nouveau/nvif/chan.c
21
cnt = push->cur - push->bgn;
drivers/gpu/drm/nouveau/nvif/chan.c
39
const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
drivers/gpu/drm/nouveau/nvif/chan.c
40
const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
drivers/gpu/drm/nouveau/nvif/chan.c
73
if (push->cur + push_nr > push->end) {
drivers/gpu/drm/nouveau/nvif/chan.c
81
chan->gpfifo.free = chan->func->gpfifo.read_get(chan) - chan->gpfifo.cur - 1;
drivers/gpu/drm/nouveau/nvif/chan506f.c
11
nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
drivers/gpu/drm/nouveau/nvif/chan506f.c
17
u32 gpptr = chan->gpfifo.cur << 3;
drivers/gpu/drm/nouveau/nvif/chan506f.c
28
chan->gpfifo.cur = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
drivers/gpu/drm/nouveau/nvif/chan506f.c
31
chan->push.end = chan->push.cur;
drivers/gpu/drm/nouveau/nvif/chanc36f.c
16
nvif_wr32(&chan->userd, 0x8c, chan->gpfifo.cur);
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
129
u32 cycles, cur, prev;
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
149
cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
151
if (prev != cur) {
drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
155
prev = cur;
drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
103
int temp, cur;
drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
118
cur = therm->func->temp_get(therm);
drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
119
if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
122
cur < thrs->temp - thrs->hysteresis)
drivers/gpu/drm/omapdrm/omap_overlay.c
37
struct omap_hw_overlay *cur = priv->overlays[i];
drivers/gpu/drm/omapdrm/omap_overlay.c
40
cur->idx, cur->id, cur->caps);
drivers/gpu/drm/omapdrm/omap_overlay.c
43
if (hwoverlay_to_plane[cur->idx])
drivers/gpu/drm/omapdrm/omap_overlay.c
47
if (caps & ~cur->caps)
drivers/gpu/drm/omapdrm/omap_overlay.c
52
cur->id, fourcc))
drivers/gpu/drm/omapdrm/omap_overlay.c
55
return cur;
drivers/gpu/drm/qxl/qxl_release.c
63
unsigned long cur, end = jiffies + timeout;
drivers/gpu/drm/qxl/qxl_release.c
73
cur = jiffies;
drivers/gpu/drm/qxl/qxl_release.c
74
if (time_after(cur, end))
drivers/gpu/drm/qxl/qxl_release.c
76
return end - cur;
drivers/gpu/drm/radeon/rv6xx_dpm.c
194
struct rv6xx_sclk_stepping *cur,
drivers/gpu/drm/radeon/rv6xx_dpm.c
199
next.post_divider = cur->post_divider;
drivers/gpu/drm/radeon/rv6xx_dpm.c
202
next.vco_frequency = (cur->vco_frequency * (100 + step_size)) / 100;
drivers/gpu/drm/radeon/rv6xx_dpm.c
204
next.vco_frequency = (cur->vco_frequency * 100 + 99 + step_size) / (100 + step_size);
drivers/gpu/drm/radeon/rv6xx_dpm.c
210
struct rv6xx_sclk_stepping *cur,
drivers/gpu/drm/radeon/rv6xx_dpm.c
213
return (cur->post_divider > target->post_divider) &&
drivers/gpu/drm/radeon/rv6xx_dpm.c
214
((cur->vco_frequency * target->post_divider) <=
drivers/gpu/drm/radeon/rv6xx_dpm.c
215
(target->vco_frequency * (cur->post_divider - 1)));
drivers/gpu/drm/radeon/rv6xx_dpm.c
219
struct rv6xx_sclk_stepping *cur,
drivers/gpu/drm/radeon/rv6xx_dpm.c
222
struct rv6xx_sclk_stepping next = *cur;
drivers/gpu/drm/radeon/rv6xx_dpm.c
231
struct rv6xx_sclk_stepping *cur,
drivers/gpu/drm/radeon/rv6xx_dpm.c
235
return (increasing_vco && (cur->vco_frequency >= target->vco_frequency)) ||
drivers/gpu/drm/radeon/rv6xx_dpm.c
236
(!increasing_vco && (cur->vco_frequency <= target->vco_frequency));
drivers/gpu/drm/radeon/rv6xx_dpm.c
243
struct rv6xx_sclk_stepping cur;
drivers/gpu/drm/radeon/rv6xx_dpm.c
248
rv6xx_convert_clock_to_stepping(rdev, low, &cur);
drivers/gpu/drm/radeon/rv6xx_dpm.c
251
rv6xx_output_stepping(rdev, step_index++, &cur);
drivers/gpu/drm/radeon/rv6xx_dpm.c
253
increasing_vco = (target.vco_frequency >= cur.vco_frequency);
drivers/gpu/drm/radeon/rv6xx_dpm.c
255
if (target.post_divider > cur.post_divider)
drivers/gpu/drm/radeon/rv6xx_dpm.c
256
cur.post_divider = target.post_divider;
drivers/gpu/drm/radeon/rv6xx_dpm.c
261
if (rv6xx_can_step_post_div(rdev, &cur, &target))
drivers/gpu/drm/radeon/rv6xx_dpm.c
262
next = rv6xx_next_post_div_step(rdev, &cur, &target);
drivers/gpu/drm/radeon/rv6xx_dpm.c
264
next = rv6xx_next_vco_step(rdev, &cur, increasing_vco, R600_VCOSTEPPCT_DFLT);
drivers/gpu/drm/radeon/rv6xx_dpm.c
271
if (!rv6xx_reached_stepping_target(rdev, &tiny, &cur, !increasing_vco))
drivers/gpu/drm/radeon/rv6xx_dpm.c
289
cur = next;
drivers/gpu/drm/ttm/ttm_resource.c
190
static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
drivers/gpu/drm/ttm/ttm_resource.c
192
struct ttm_lru_item *lru = &cur->lru;
drivers/gpu/drm/ttm/ttm_resource.c
202
static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
drivers/gpu/drm/ttm/ttm_resource.c
204
struct ttm_lru_item *lru = &cur->lru;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1007
struct vmw_cmdbuf_header *cur;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1013
cur = man->cur;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1014
if (cur && (size + man->cur_pos > cur->size ||
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1015
((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1016
ctx_id != cur->cb_header->dxContext)))
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1019
if (!man->cur) {
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1022
interruptible, &man->cur);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1028
cur = man->cur;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1032
cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1033
cur->cb_header->dxContext = ctx_id;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1036
cur->reserved = size;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1038
return (void *) (man->cur->cmd + man->cur_pos);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1051
struct vmw_cmdbuf_header *cur = man->cur;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1055
WARN_ON(size > cur->reserved);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1058
cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
1116
man->cur = header;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
122
struct vmw_cmdbuf_header *cur;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
666
struct vmw_cmdbuf_header *cur = man->cur;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
670
if (!cur)
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
675
__vmw_cmdbuf_header_free(cur);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
679
man->cur->cb_header->length = man->cur_pos;
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
680
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
683
man->cur = NULL;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
103
struct vmw_ctx_binding_state *cur;
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
163
vmw_binding_state_commit(entry->cur, entry->staged);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
229
node->cur = vmw_context_binding_state(res);
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
730
ret = vmw_binding_rebind_all(val->cur);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
302
pgoff_t start, cur, end;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
313
cur = max(res_start, dirty->start);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
315
while (cur < res_end) {
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
318
start = find_next_bit(&dirty->bitmap[0], res_end, cur);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
323
cur = end + 1;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
338
pgoff_t start, cur, end;
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
349
cur = max(res_start, dirty->start);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
351
while (cur < res_end) {
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
354
start = find_next_bit(&dirty->bitmap[0], res_end, cur);
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
359
cur = end + 1;
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1103
struct rb_node *cur = vbo->res_tree.rb_node;
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1114
while (cur) {
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1116
container_of(cur, struct vmw_resource, mob_node);
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1119
cur = cur->rb_left;
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1122
cur = cur->rb_right;
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1125
cur = cur->rb_left;
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1139
cur = rb_next(&found->mob_node);
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1140
if (!cur)
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
1143
found = container_of(cur, struct vmw_resource, mob_node);
drivers/gpu/drm/xe/xe_bo.c
2987
struct xe_res_cursor cur;
drivers/gpu/drm/xe/xe_bo.c
2998
page_size, &cur);
drivers/gpu/drm/xe/xe_bo.c
2999
return xe_res_dma(&cur) + offset;
drivers/gpu/drm/xe/xe_bo.c
3001
struct xe_res_cursor cur;
drivers/gpu/drm/xe/xe_bo.c
3004
page_size, &cur);
drivers/gpu/drm/xe/xe_bo.c
3005
return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
drivers/gpu/drm/xe/xe_ggtt.c
766
struct xe_res_cursor cur;
drivers/gpu/drm/xe/xe_ggtt.c
777
for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur);
drivers/gpu/drm/xe/xe_ggtt.c
778
cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
drivers/gpu/drm/xe/xe_ggtt.c
779
ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
drivers/gpu/drm/xe/xe_ggtt.c
780
pte | xe_res_dma(&cur));
drivers/gpu/drm/xe/xe_ggtt.c
785
for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
drivers/gpu/drm/xe/xe_ggtt.c
786
cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
drivers/gpu/drm/xe/xe_ggtt.c
787
ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
drivers/gpu/drm/xe/xe_ggtt.c
788
pte + cur.start);
drivers/gpu/drm/xe/xe_lmtt.c
470
struct xe_res_cursor cur;
drivers/gpu/drm/xe/xe_lmtt.c
479
xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur);
drivers/gpu/drm/xe/xe_lmtt.c
480
while (cur.remaining) {
drivers/gpu/drm/xe/xe_lmtt.c
481
addr = xe_res_dma(&cur);
drivers/gpu/drm/xe/xe_lmtt.c
489
xe_res_next(&cur, page_size);
drivers/gpu/drm/xe/xe_migrate.c
527
static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_migrate.c
530
u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining);
drivers/gpu/drm/xe/xe_migrate.c
532
if (mem_type_is_vram(cur->mem_type)) {
drivers/gpu/drm/xe/xe_migrate.c
542
u64 chunk = max_t(u64, cur->size, m->min_chunk_size);
drivers/gpu/drm/xe/xe_migrate.c
552
static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_migrate.c
555
return cur->size >= size;
drivers/gpu/drm/xe/xe_migrate.c
564
struct xe_res_cursor *cur,
drivers/gpu/drm/xe/xe_migrate.c
573
if (is_vram && xe_migrate_allow_identity(*L0, cur)) {
drivers/gpu/drm/xe/xe_migrate.c
576
cur->start + vram_region_gpu_offset(res),
drivers/gpu/drm/xe/xe_migrate.c
603
struct xe_res_cursor *cur,
drivers/gpu/drm/xe/xe_migrate.c
637
addr = xe_res_dma(cur) & PAGE_MASK;
drivers/gpu/drm/xe/xe_migrate.c
658
xe_res_next(cur, min_t(u32, size, PAGE_SIZE));
drivers/gpu/drm/xe/xe_mmio.c
285
ktime_t cur = ktime_get_raw();
drivers/gpu/drm/xe/xe_mmio.c
286
const ktime_t end = ktime_add_us(cur, timeout_us);
drivers/gpu/drm/xe/xe_mmio.c
304
cur = ktime_get_raw();
drivers/gpu/drm/xe/xe_mmio.c
305
if (!ktime_before(cur, end))
drivers/gpu/drm/xe/xe_mmio.c
308
if (ktime_after(ktime_add_us(cur, wait), end))
drivers/gpu/drm/xe/xe_mmio.c
309
wait = ktime_us_delta(end, cur);
drivers/gpu/drm/xe/xe_res_cursor.h
101
cur->mem_type = res->mem_type;
drivers/gpu/drm/xe/xe_res_cursor.h
103
switch (cur->mem_type) {
drivers/gpu/drm/xe/xe_res_cursor.h
128
cur->mm = mm;
drivers/gpu/drm/xe/xe_res_cursor.h
129
cur->start = drm_buddy_block_offset(block) + start;
drivers/gpu/drm/xe/xe_res_cursor.h
130
cur->size = min(drm_buddy_block_size(mm, block) - start,
drivers/gpu/drm/xe/xe_res_cursor.h
132
cur->remaining = size;
drivers/gpu/drm/xe/xe_res_cursor.h
133
cur->node = block;
drivers/gpu/drm/xe/xe_res_cursor.h
143
cur->start = start;
drivers/gpu/drm/xe/xe_res_cursor.h
144
cur->size = size;
drivers/gpu/drm/xe/xe_res_cursor.h
145
cur->remaining = size;
drivers/gpu/drm/xe/xe_res_cursor.h
146
cur->node = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
147
cur->mem_type = XE_PL_TT;
drivers/gpu/drm/xe/xe_res_cursor.h
151
static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
153
struct scatterlist *sgl = cur->sgl;
drivers/gpu/drm/xe/xe_res_cursor.h
154
u64 start = cur->start;
drivers/gpu/drm/xe/xe_res_cursor.h
162
cur->start = start;
drivers/gpu/drm/xe/xe_res_cursor.h
163
cur->size = sg_dma_len(sgl) - start;
drivers/gpu/drm/xe/xe_res_cursor.h
164
cur->sgl = sgl;
drivers/gpu/drm/xe/xe_res_cursor.h
171
static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
173
const struct drm_pagemap_addr *addr = cur->dma_addr;
drivers/gpu/drm/xe/xe_res_cursor.h
174
u64 start = cur->start;
drivers/gpu/drm/xe/xe_res_cursor.h
176
while (start >= cur->dma_seg_size) {
drivers/gpu/drm/xe/xe_res_cursor.h
177
start -= cur->dma_seg_size;
drivers/gpu/drm/xe/xe_res_cursor.h
179
cur->dma_seg_size = PAGE_SIZE << addr->order;
drivers/gpu/drm/xe/xe_res_cursor.h
181
cur->dma_start = addr->addr;
drivers/gpu/drm/xe/xe_res_cursor.h
184
while (cur->dma_seg_size - start < cur->remaining) {
drivers/gpu/drm/xe/xe_res_cursor.h
185
if (cur->dma_start + cur->dma_seg_size != addr[1].addr ||
drivers/gpu/drm/xe/xe_res_cursor.h
189
cur->dma_seg_size += PAGE_SIZE << addr->order;
drivers/gpu/drm/xe/xe_res_cursor.h
192
cur->dma_addr = addr;
drivers/gpu/drm/xe/xe_res_cursor.h
193
cur->start = start;
drivers/gpu/drm/xe/xe_res_cursor.h
194
cur->size = cur->dma_seg_size - start;
drivers/gpu/drm/xe/xe_res_cursor.h
209
struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
212
cur->node = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
213
cur->start = start;
drivers/gpu/drm/xe/xe_res_cursor.h
214
cur->remaining = size;
drivers/gpu/drm/xe/xe_res_cursor.h
215
cur->size = 0;
drivers/gpu/drm/xe/xe_res_cursor.h
216
cur->dma_addr = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
217
cur->sgl = sg->sgl;
drivers/gpu/drm/xe/xe_res_cursor.h
218
cur->mem_type = XE_PL_TT;
drivers/gpu/drm/xe/xe_res_cursor.h
219
__xe_res_sg_next(cur);
drivers/gpu/drm/xe/xe_res_cursor.h
234
struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
240
cur->node = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
241
cur->start = start;
drivers/gpu/drm/xe/xe_res_cursor.h
242
cur->remaining = size;
drivers/gpu/drm/xe/xe_res_cursor.h
243
cur->dma_seg_size = PAGE_SIZE << dma_addr->order;
drivers/gpu/drm/xe/xe_res_cursor.h
244
cur->dma_start = 0;
drivers/gpu/drm/xe/xe_res_cursor.h
245
cur->size = 0;
drivers/gpu/drm/xe/xe_res_cursor.h
246
cur->dma_addr = dma_addr;
drivers/gpu/drm/xe/xe_res_cursor.h
247
__xe_res_dma_next(cur);
drivers/gpu/drm/xe/xe_res_cursor.h
248
cur->sgl = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
249
cur->mem_type = XE_PL_TT;
drivers/gpu/drm/xe/xe_res_cursor.h
260
static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
drivers/gpu/drm/xe/xe_res_cursor.h
266
XE_WARN_ON(size > cur->remaining);
drivers/gpu/drm/xe/xe_res_cursor.h
268
cur->remaining -= size;
drivers/gpu/drm/xe/xe_res_cursor.h
269
if (!cur->remaining)
drivers/gpu/drm/xe/xe_res_cursor.h
272
if (cur->size > size) {
drivers/gpu/drm/xe/xe_res_cursor.h
273
cur->size -= size;
drivers/gpu/drm/xe/xe_res_cursor.h
274
cur->start += size;
drivers/gpu/drm/xe/xe_res_cursor.h
278
if (cur->dma_addr) {
drivers/gpu/drm/xe/xe_res_cursor.h
279
cur->start += size;
drivers/gpu/drm/xe/xe_res_cursor.h
280
__xe_res_dma_next(cur);
drivers/gpu/drm/xe/xe_res_cursor.h
284
if (cur->sgl) {
drivers/gpu/drm/xe/xe_res_cursor.h
285
cur->start += size;
drivers/gpu/drm/xe/xe_res_cursor.h
286
__xe_res_sg_next(cur);
drivers/gpu/drm/xe/xe_res_cursor.h
290
switch (cur->mem_type) {
drivers/gpu/drm/xe/xe_res_cursor.h
294
start = size - cur->size;
drivers/gpu/drm/xe/xe_res_cursor.h
295
block = cur->node;
drivers/gpu/drm/xe/xe_res_cursor.h
301
while (start >= drm_buddy_block_size(cur->mm, block)) {
drivers/gpu/drm/xe/xe_res_cursor.h
302
start -= drm_buddy_block_size(cur->mm, block);
drivers/gpu/drm/xe/xe_res_cursor.h
308
cur->start = drm_buddy_block_offset(block) + start;
drivers/gpu/drm/xe/xe_res_cursor.h
309
cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
drivers/gpu/drm/xe/xe_res_cursor.h
310
cur->remaining);
drivers/gpu/drm/xe/xe_res_cursor.h
311
cur->node = block;
drivers/gpu/drm/xe/xe_res_cursor.h
323
static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
325
if (cur->dma_addr)
drivers/gpu/drm/xe/xe_res_cursor.h
326
return cur->dma_start + cur->start;
drivers/gpu/drm/xe/xe_res_cursor.h
327
else if (cur->sgl)
drivers/gpu/drm/xe/xe_res_cursor.h
328
return sg_dma_address(cur->sgl) + cur->start;
drivers/gpu/drm/xe/xe_res_cursor.h
330
return cur->start;
drivers/gpu/drm/xe/xe_res_cursor.h
340
static inline bool xe_res_is_vram(const struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
342
if (cur->dma_addr)
drivers/gpu/drm/xe/xe_res_cursor.h
343
return cur->dma_addr->proto == XE_INTERCONNECT_VRAM;
drivers/gpu/drm/xe/xe_res_cursor.h
345
switch (cur->mem_type) {
drivers/gpu/drm/xe/xe_res_cursor.h
92
struct xe_res_cursor *cur)
drivers/gpu/drm/xe/xe_res_cursor.h
94
cur->sgl = NULL;
drivers/gpu/drm/xe/xe_res_cursor.h
95
cur->dma_addr = NULL;
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
268
struct xe_res_cursor cur;
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
275
xe_res_first(bo->ttm.resource, offset, 4096, &cur);
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
276
return mgr->io_base + cur.start;
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
283
struct xe_res_cursor cur;
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
288
xe_res_first(mem, 0, 4096, &cur);
drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
289
mem->bus.offset = cur.start;
drivers/gpu/ipu-v3/ipu-pre.c
109
} cur;
drivers/gpu/ipu-v3/ipu-pre.c
146
if (pre->cur.in_use)
drivers/gpu/ipu-v3/ipu-pre.c
159
pre->cur.in_use = true;
drivers/gpu/ipu-v3/ipu-pre.c
167
pre->cur.in_use = false;
drivers/gpu/ipu-v3/ipu-pre.c
173
if (pre->cur.modifier == DRM_FORMAT_MOD_LINEAR)
drivers/gpu/ipu-v3/ipu-pre.c
174
pre->cur.safe_window_end = pre->cur.height - 2;
drivers/gpu/ipu-v3/ipu-pre.c
176
pre->cur.safe_window_end = DIV_ROUND_UP(pre->cur.height, 4) - 1;
drivers/gpu/ipu-v3/ipu-pre.c
191
if (pre->cur.cpp == 2)
drivers/gpu/ipu-v3/ipu-pre.c
197
pre->cur.ctrl &= ~IPU_PRE_CTRL_BLOCK_EN;
drivers/gpu/ipu-v3/ipu-pre.c
199
pre->cur.ctrl |= IPU_PRE_CTRL_BLOCK_EN;
drivers/gpu/ipu-v3/ipu-pre.c
201
pre->cur.modifier = modifier;
drivers/gpu/ipu-v3/ipu-pre.c
212
pre->cur.bufaddr = bufaddr;
drivers/gpu/ipu-v3/ipu-pre.c
213
pre->cur.height = height;
drivers/gpu/ipu-v3/ipu-pre.c
214
pre->cur.cpp = info->cpp[0];
drivers/gpu/ipu-v3/ipu-pre.c
215
pre->cur.ctrl = readl(pre->regs + IPU_PRE_CTRL);
drivers/gpu/ipu-v3/ipu-pre.c
253
pre->cur.ctrl |= IPU_PRE_CTRL_EN_REPEAT | IPU_PRE_CTRL_ENABLE;
drivers/gpu/ipu-v3/ipu-pre.c
254
writel(pre->cur.ctrl | IPU_PRE_CTRL_SDW_UPDATE,
drivers/gpu/ipu-v3/ipu-pre.c
260
if (bufaddr == pre->cur.bufaddr &&
drivers/gpu/ipu-v3/ipu-pre.c
261
modifier == pre->cur.modifier)
drivers/gpu/ipu-v3/ipu-pre.c
265
pre->cur.bufaddr = bufaddr;
drivers/gpu/ipu-v3/ipu-pre.c
267
if (modifier != pre->cur.modifier)
drivers/gpu/ipu-v3/ipu-pre.c
285
current_yblock < pre->cur.safe_window_end)
drivers/gpu/ipu-v3/ipu-pre.c
292
writel(pre->cur.ctrl | IPU_PRE_CTRL_SDW_UPDATE,
drivers/hid/bpf/hid_bpf_struct_ops.c
102
if (write_range->struct_name != cur) {
drivers/hid/bpf/hid_bpf_struct_ops.c
113
cur = write_range->struct_name;
drivers/hid/bpf/hid_bpf_struct_ops.c
118
if (cur != write_range->struct_name &&
drivers/hid/bpf/hid_bpf_struct_ops.c
129
cur = write_range->struct_name;
drivers/hid/bpf/hid_bpf_struct_ops.c
138
off, size, cur);
drivers/hid/bpf/hid_bpf_struct_ops.c
86
const char *cur = NULL;
drivers/hid/bpf/hid_bpf_struct_ops.c
98
if (t == state && write_range->struct_name != cur)
drivers/hid/wacom.h
248
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur);
drivers/hid/wacom_sys.c
1528
struct wacom_led *wacom_led_next(struct wacom *wacom, struct wacom_led *cur)
drivers/hid/wacom_sys.c
1533
if (!wacom || !cur)
drivers/hid/wacom_sys.c
1536
group = cur->group;
drivers/hid/wacom_sys.c
1537
next = cur->id;
drivers/hid/wacom_sys.c
1541
if (!next_led || next_led == cur)
drivers/hid/wacom_wac.c
4169
int cur;
drivers/hid/wacom_wac.c
4176
cur = wacom->led.groups[group].select;
drivers/hid/wacom_wac.c
4178
led = wacom_led_find(wacom, group, cur);
drivers/hid/wacom_wac.c
4181
cur, group);
drivers/hv/vmbus_drv.c
499
struct list_head *cur;
drivers/hv/vmbus_drv.c
510
list_for_each(cur, &channel->sc_list) {
drivers/hv/vmbus_drv.c
512
cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
drivers/hwmon/lm77.c
243
int i, cur, conf, hyst, crit, min, max;
drivers/hwmon/lm77.c
264
cur = i2c_smbus_read_word_data(client, 0);
drivers/hwmon/lm77.c
280
if (((cur & 0x00f0) != 0xf0 && (cur & 0x00f0) != 0x0)
drivers/hwmon/lm77.c
292
cur = i2c_smbus_read_word_data(client, 0);
drivers/hwmon/lm77.c
293
if (i2c_smbus_read_word_data(client, 6) != cur
drivers/hwmon/lm77.c
294
|| i2c_smbus_read_word_data(client, 7) != cur)
drivers/hwmon/lm80.c
552
int i, cur, man_id, dev_id;
drivers/hwmon/lm80.c
579
cur = i2c_smbus_read_byte_data(client, i);
drivers/hwmon/lm80.c
580
if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
drivers/hwmon/lm80.c
581
|| (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
drivers/hwmon/lm80.c
582
|| (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
drivers/hwmon/lm93.c
1057
data->block4[i].cur =
drivers/hwmon/lm93.c
2114
return sprintf(buf, "%d\n", data->block4[nr].cur);
drivers/hwmon/lm93.c
221
u8 cur;
drivers/hwtracing/coresight/coresight-etb10.c
417
buf->cur = head / PAGE_SIZE;
drivers/hwtracing/coresight/coresight-etb10.c
432
int i, cur;
drivers/hwtracing/coresight/coresight-etb10.c
529
cur = buf->cur;
drivers/hwtracing/coresight/coresight-etb10.c
534
buf_ptr = buf->data_pages[cur] + offset;
drivers/hwtracing/coresight/coresight-etb10.c
548
cur++;
drivers/hwtracing/coresight/coresight-etb10.c
550
cur &= buf->nr_pages - 1;
drivers/hwtracing/coresight/coresight-priv.h
104
unsigned int cur;
drivers/hwtracing/coresight/coresight-tmc-etf.c
464
buf->cur = head / PAGE_SIZE;
drivers/hwtracing/coresight/coresight-tmc-etf.c
479
int i, cur;
drivers/hwtracing/coresight/coresight-tmc-etf.c
555
cur = buf->cur;
drivers/hwtracing/coresight/coresight-tmc-etf.c
561
buf_ptr = buf->data_pages[cur] + offset;
drivers/hwtracing/coresight/coresight-tmc-etf.c
572
cur++;
drivers/hwtracing/coresight/coresight-tmc-etf.c
574
cur &= buf->nr_pages - 1;
drivers/i2c/busses/i2c-qcom-geni.c
106
struct i2c_msg *cur;
drivers/i2c/busses/i2c-qcom-geni.c
248
if (gi2c->cur)
drivers/i2c/busses/i2c-qcom-geni.c
250
gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
drivers/i2c/busses/i2c-qcom-geni.c
278
struct i2c_msg *cur;
drivers/i2c/busses/i2c-qcom-geni.c
286
cur = gi2c->cur;
drivers/i2c/busses/i2c-qcom-geni.c
288
if (!cur ||
drivers/i2c/busses/i2c-qcom-geni.c
312
} else if (cur->flags & I2C_M_RD &&
drivers/i2c/busses/i2c-qcom-geni.c
319
while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
drivers/i2c/busses/i2c-qcom-geni.c
320
cur->buf[gi2c->cur_rd++] = val & 0xff;
drivers/i2c/busses/i2c-qcom-geni.c
324
if (gi2c->cur_rd == cur->len)
drivers/i2c/busses/i2c-qcom-geni.c
327
} else if (!(cur->flags & I2C_M_RD) &&
drivers/i2c/busses/i2c-qcom-geni.c
334
while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
drivers/i2c/busses/i2c-qcom-geni.c
335
temp = cur->buf[gi2c->cur_wr++];
drivers/i2c/busses/i2c-qcom-geni.c
341
if (gi2c->cur_wr == cur->len) {
drivers/i2c/busses/i2c-qcom-geni.c
374
gi2c->cur = NULL;
drivers/i2c/busses/i2c-qcom-geni.c
418
struct i2c_msg *cur)
drivers/i2c/busses/i2c-qcom-geni.c
425
i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
drivers/i2c/busses/i2c-qcom-geni.c
430
struct i2c_msg *cur)
drivers/i2c/busses/i2c-qcom-geni.c
437
i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
drivers/i2c/busses/i2c-qcom-geni.c
449
struct i2c_msg *cur;
drivers/i2c/busses/i2c-qcom-geni.c
470
cur = gi2c->cur;
drivers/i2c/busses/i2c-qcom-geni.c
475
geni_i2c_rx_msg_cleanup(gi2c, cur);
drivers/i2c/busses/i2c-qcom-geni.c
488
struct i2c_msg *cur;
drivers/i2c/busses/i2c-qcom-geni.c
512
cur = gi2c->cur;
drivers/i2c/busses/i2c-qcom-geni.c
517
geni_i2c_tx_msg_cleanup(gi2c, cur);
drivers/i2c/busses/i2c-qcom-geni.c
810
gi2c->cur = &msgs[i];
drivers/i2c/busses/i2c-qcom-geni.c
812
dev_dbg(gi2c->se.dev, "msg[%d].len:%d\n", i, gi2c->cur->len);
drivers/i2c/busses/i2c-qcom-geni.c
880
gi2c->cur = &msgs[i];
drivers/i2c/busses/i2c-qcom-geni.c
919
gi2c->cur = NULL;
drivers/i2c/muxes/i2c-demux-pinctrl.c
132
int ret, cur = priv->cur_chan;
drivers/i2c/muxes/i2c-demux-pinctrl.c
134
if (cur < 0)
drivers/i2c/muxes/i2c-demux-pinctrl.c
138
i2c_put_adapter(priv->chan[cur].parent_adap);
drivers/i2c/muxes/i2c-demux-pinctrl.c
140
ret = of_changeset_revert(&priv->chan[cur].chgset);
drivers/i2c/muxes/i2c-demux-pinctrl.c
142
priv->chan[cur].parent_adap = NULL;
drivers/i3c/master/adi-i3c-master.c
114
struct adi_i3c_xfer *cur;
drivers/i3c/master/adi-i3c-master.c
203
struct adi_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/adi-i3c-master.c
233
struct adi_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/adi-i3c-master.c
283
master->xferqueue.cur = xfer;
drivers/i3c/master/adi-i3c-master.c
292
if (master->xferqueue.cur) {
drivers/i3c/master/adi-i3c-master.c
295
master->xferqueue.cur = xfer;
drivers/i3c/master/adi-i3c-master.c
304
if (master->xferqueue.cur == xfer)
drivers/i3c/master/adi-i3c-master.c
305
master->xferqueue.cur = NULL;
drivers/i3c/master/dw-i3c-master.c
403
struct dw_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/dw-i3c-master.c
434
if (master->xferqueue.cur) {
drivers/i3c/master/dw-i3c-master.c
437
master->xferqueue.cur = xfer;
drivers/i3c/master/dw-i3c-master.c
445
if (master->xferqueue.cur == xfer) {
drivers/i3c/master/dw-i3c-master.c
448
master->xferqueue.cur = NULL;
drivers/i3c/master/dw-i3c-master.c
470
struct dw_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/dw-i3c-master.c
531
master->xferqueue.cur = xfer;
drivers/i3c/master/dw-i3c-master.h
34
struct dw_i3c_xfer *cur;
drivers/i3c/master/i3c-master-cdns.c
412
struct cdns_i3c_xfer *cur;
drivers/i3c/master/i3c-master-cdns.c
519
struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/i3c-master-cdns.c
549
struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/i3c-master-cdns.c
616
master->xferqueue.cur = xfer;
drivers/i3c/master/i3c-master-cdns.c
627
if (master->xferqueue.cur) {
drivers/i3c/master/i3c-master-cdns.c
630
master->xferqueue.cur = xfer;
drivers/i3c/master/i3c-master-cdns.c
642
if (master->xferqueue.cur == xfer) {
drivers/i3c/master/i3c-master-cdns.c
650
master->xferqueue.cur = NULL;
drivers/i3c/master/renesas-i3c.c
1019
xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
1059
xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
1130
i3c->xferqueue.cur = xfer;
drivers/i3c/master/renesas-i3c.c
1143
xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
1189
xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
1240
xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
1264
xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
249
struct renesas_i3c_xfer *cur;
drivers/i3c/master/renesas-i3c.c
361
struct renesas_i3c_xfer *xfer = i3c->xferqueue.cur;
drivers/i3c/master/renesas-i3c.c
409
if (i3c->xferqueue.cur == xfer)
drivers/i3c/master/renesas-i3c.c
410
i3c->xferqueue.cur = NULL;
drivers/i3c/master/renesas-i3c.c
425
if (i3c->xferqueue.cur) {
drivers/i3c/master/renesas-i3c.c
428
i3c->xferqueue.cur = xfer;
drivers/i3c/master/svc-i3c-master.c
1526
if (master->xferqueue.cur == xfer)
drivers/i3c/master/svc-i3c-master.c
1527
master->xferqueue.cur = NULL;
drivers/i3c/master/svc-i3c-master.c
1549
struct svc_i3c_xfer *xfer = master->xferqueue.cur;
drivers/i3c/master/svc-i3c-master.c
1585
master->xferqueue.cur = xfer;
drivers/i3c/master/svc-i3c-master.c
1603
if (master->xferqueue.cur) {
drivers/i3c/master/svc-i3c-master.c
1606
master->xferqueue.cur = xfer;
drivers/i3c/master/svc-i3c-master.c
245
struct svc_i3c_xfer *cur;
drivers/iio/adc/ti-tsc2046.c
347
struct tsc2046_adc_group_layout *cur;
drivers/iio/adc/ti-tsc2046.c
357
cur = &priv->l[group];
drivers/iio/adc/ti-tsc2046.c
358
cur->offset = offset;
drivers/iio/adc/ti-tsc2046.c
359
cur->count = max_count;
drivers/iio/adc/ti-tsc2046.c
360
cur->skip = count_skip;
drivers/iio/trigger/stm32-lptimer-trigger.c
104
cur++;
drivers/iio/trigger/stm32-lptimer-trigger.c
87
const char * const *cur = priv->triggers;
drivers/iio/trigger/stm32-lptimer-trigger.c
90
while (cur && *cur) {
drivers/iio/trigger/stm32-lptimer-trigger.c
93
trig = devm_iio_trigger_alloc(priv->dev, "%s", *cur);
drivers/iio/trigger/stm32-timer-trigger.c
408
const char * const *cur = priv->triggers;
drivers/iio/trigger/stm32-timer-trigger.c
412
while (cur && *cur) {
drivers/iio/trigger/stm32-timer-trigger.c
414
bool cur_is_trgo = stm32_timer_is_trgo_name(*cur);
drivers/iio/trigger/stm32-timer-trigger.c
415
bool cur_is_trgo2 = stm32_timer_is_trgo2_name(*cur);
drivers/iio/trigger/stm32-timer-trigger.c
418
cur++;
drivers/iio/trigger/stm32-timer-trigger.c
422
trig = devm_iio_trigger_alloc(priv->dev, "%s", *cur);
drivers/iio/trigger/stm32-timer-trigger.c
445
cur++;
drivers/iio/trigger/stm32-timer-trigger.c
530
const char * const *cur = priv->valids;
drivers/iio/trigger/stm32-timer-trigger.c
536
while (cur && *cur) {
drivers/iio/trigger/stm32-timer-trigger.c
537
if (!strncmp(trig->name, *cur, strlen(trig->name))) {
drivers/iio/trigger/stm32-timer-trigger.c
543
cur++;
drivers/infiniband/core/device.c
2347
struct ib_port_data *cur;
drivers/infiniband/core/device.c
2350
hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
drivers/infiniband/core/device.c
2352
if (rcu_access_pointer(cur->netdev) == ndev &&
drivers/infiniband/core/device.c
2354
cur->ib_dev->ops.driver_id == driver_id) &&
drivers/infiniband/core/device.c
2355
ib_device_try_get(cur->ib_dev)) {
drivers/infiniband/core/device.c
2356
res = cur->ib_dev;
drivers/infiniband/core/umem_dmabuf.c
19
unsigned long start, end, cur = 0;
drivers/infiniband/core/umem_dmabuf.c
43
if (start < cur + sg_dma_len(sg) && cur < end)
drivers/infiniband/core/umem_dmabuf.c
45
if (cur <= start && start < cur + sg_dma_len(sg)) {
drivers/infiniband/core/umem_dmabuf.c
46
unsigned long offset = start - cur;
drivers/infiniband/core/umem_dmabuf.c
52
cur += offset;
drivers/infiniband/core/umem_dmabuf.c
54
if (cur < end && end <= cur + sg_dma_len(sg)) {
drivers/infiniband/core/umem_dmabuf.c
55
unsigned long trim = cur + sg_dma_len(sg) - end;
drivers/infiniband/core/umem_dmabuf.c
62
cur += sg_dma_len(sg);
drivers/infiniband/core/uverbs_cmd.c
127
const void __user *cur;
drivers/infiniband/core/uverbs_cmd.c
142
iter->cur = attrs->ucore.inbuf + req_len;
drivers/infiniband/core/uverbs_cmd.c
150
if (iter->cur + len > iter->end)
drivers/infiniband/core/uverbs_cmd.c
153
if (copy_from_user(val, iter->cur, len))
drivers/infiniband/core/uverbs_cmd.c
156
iter->cur += len;
drivers/infiniband/core/uverbs_cmd.c
163
const void __user *res = iter->cur;
drivers/infiniband/core/uverbs_cmd.c
165
if (len > iter->end - iter->cur)
drivers/infiniband/core/uverbs_cmd.c
167
iter->cur += len;
drivers/infiniband/core/uverbs_cmd.c
173
if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
drivers/infiniband/hw/cxgb4/device.c
476
dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
drivers/infiniband/hw/cxgb4/device.c
479
dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
drivers/infiniband/hw/cxgb4/device.c
482
dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
drivers/infiniband/hw/cxgb4/device.c
485
dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
drivers/infiniband/hw/cxgb4/device.c
488
dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
drivers/infiniband/hw/cxgb4/device.c
491
dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
drivers/infiniband/hw/cxgb4/device.c
494
dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
drivers/infiniband/hw/cxgb4/device.c
761
rdev->stats.qid.cur -= rdev->qpmask + 1;
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
122
u64 cur;
drivers/infiniband/hw/cxgb4/mem.c
302
rdev->stats.stag.cur += 32;
drivers/infiniband/hw/cxgb4/mem.c
303
if (rdev->stats.stag.cur > rdev->stats.stag.max)
drivers/infiniband/hw/cxgb4/mem.c
304
rdev->stats.stag.max = rdev->stats.stag.cur;
drivers/infiniband/hw/cxgb4/mem.c
339
rdev->stats.stag.cur -= 32;
drivers/infiniband/hw/cxgb4/provider.c
203
rhp->rdev.stats.pd.cur--;
drivers/infiniband/hw/cxgb4/provider.c
232
rhp->rdev.stats.pd.cur++;
drivers/infiniband/hw/cxgb4/provider.c
233
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
drivers/infiniband/hw/cxgb4/provider.c
234
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
drivers/infiniband/hw/cxgb4/resource.c
126
rdev->stats.qid.cur += rdev->qpmask + 1;
drivers/infiniband/hw/cxgb4/resource.c
157
if (rdev->stats.qid.cur > rdev->stats.qid.max)
drivers/infiniband/hw/cxgb4/resource.c
158
rdev->stats.qid.max = rdev->stats.qid.cur;
drivers/infiniband/hw/cxgb4/resource.c
200
rdev->stats.qid.cur += rdev->qpmask + 1;
drivers/infiniband/hw/cxgb4/resource.c
231
if (rdev->stats.qid.cur > rdev->stats.qid.max)
drivers/infiniband/hw/cxgb4/resource.c
232
rdev->stats.qid.max = rdev->stats.qid.cur;
drivers/infiniband/hw/cxgb4/resource.c
271
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
drivers/infiniband/hw/cxgb4/resource.c
272
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
drivers/infiniband/hw/cxgb4/resource.c
273
rdev->stats.pbl.max = rdev->stats.pbl.cur;
drivers/infiniband/hw/cxgb4/resource.c
294
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
drivers/infiniband/hw/cxgb4/resource.c
353
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
drivers/infiniband/hw/cxgb4/resource.c
354
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
drivers/infiniband/hw/cxgb4/resource.c
355
rdev->stats.rqt.max = rdev->stats.rqt.cur;
drivers/infiniband/hw/cxgb4/resource.c
376
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
drivers/infiniband/hw/cxgb4/resource.c
438
rdev->stats.srqt.cur++;
drivers/infiniband/hw/cxgb4/resource.c
439
if (rdev->stats.srqt.cur > rdev->stats.srqt.max)
drivers/infiniband/hw/cxgb4/resource.c
440
rdev->stats.srqt.max = rdev->stats.srqt.cur;
drivers/infiniband/hw/cxgb4/resource.c
449
rdev->stats.srqt.cur--;
drivers/infiniband/hw/cxgb4/resource.c
464
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
drivers/infiniband/hw/cxgb4/resource.c
465
if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max)
drivers/infiniband/hw/cxgb4/resource.c
466
rdev->stats.ocqp.max = rdev->stats.ocqp.cur;
drivers/infiniband/hw/cxgb4/resource.c
476
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
drivers/infiniband/hw/hns/hns_roce_hem.c
1101
struct hns_roce_hem_item *cur, *pre;
drivers/infiniband/hw/hns/hns_roce_hem.c
1133
cur = hem_list_search_item(&mid_bt[level], offset);
drivers/infiniband/hw/hns/hns_roce_hem.c
1134
if (cur) {
drivers/infiniband/hw/hns/hns_roce_hem.c
1135
hem_ptrs[level] = cur;
drivers/infiniband/hw/hns/hns_roce_hem.c
1148
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
drivers/infiniband/hw/hns/hns_roce_hem.c
1150
if (!cur) {
drivers/infiniband/hw/hns/hns_roce_hem.c
1154
hem_ptrs[level] = cur;
drivers/infiniband/hw/hns/hns_roce_hem.c
1155
list_add(&cur->list, &temp_list[level]);
drivers/infiniband/hw/hns/hns_roce_hem.c
1157
list_add(&cur->sibling, &temp_list[0]);
drivers/infiniband/hw/hns/hns_roce_hem.c
1162
step = (cur->start - pre->start) / step * BA_BYTE_LEN;
drivers/infiniband/hw/hns/hns_roce_hem.c
1163
hem_list_link_bt(pre->addr + step, cur->dma_addr);
drivers/infiniband/hw/hns/hns_roce_qp.c
1585
u32 cur;
drivers/infiniband/hw/hns/hns_roce_qp.c
1587
cur = hr_wq->head - hr_wq->tail;
drivers/infiniband/hw/hns/hns_roce_qp.c
1588
if (likely(cur + nreq < hr_wq->wqe_cnt))
drivers/infiniband/hw/hns/hns_roce_qp.c
1593
cur = hr_wq->head - hr_wq->tail;
drivers/infiniband/hw/hns/hns_roce_qp.c
1596
return cur + nreq >= hr_wq->wqe_cnt;
drivers/infiniband/hw/mlx4/cq.c
616
unsigned cur;
drivers/infiniband/hw/mlx4/cq.c
620
cur = wq->head - wq->tail;
drivers/infiniband/hw/mlx4/cq.c
622
if (cur == 0)
drivers/infiniband/hw/mlx4/cq.c
625
for (i = 0; i < cur && *npolled < num_entries; i++) {
drivers/infiniband/hw/mlx4/qp.c
3287
unsigned cur;
drivers/infiniband/hw/mlx4/qp.c
3290
cur = wq->head - wq->tail;
drivers/infiniband/hw/mlx4/qp.c
3291
if (likely(cur + nreq < wq->max_post))
drivers/infiniband/hw/mlx4/qp.c
3296
cur = wq->head - wq->tail;
drivers/infiniband/hw/mlx4/qp.c
3299
return cur + nreq >= wq->max_post;
drivers/infiniband/hw/mlx5/cq.c
401
unsigned int cur;
drivers/infiniband/hw/mlx5/cq.c
406
cur = wq->head - wq->tail;
drivers/infiniband/hw/mlx5/cq.c
409
if (cur == 0)
drivers/infiniband/hw/mlx5/cq.c
412
for (i = 0; i < cur && np < num_entries; i++) {
drivers/infiniband/hw/mlx5/mr.c
672
struct mlx5_cache_ent *cur;
drivers/infiniband/hw/mlx5/mr.c
677
cur = rb_entry(*new, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
679
cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key);
drivers/infiniband/hw/mlx5/mr.c
700
struct mlx5_cache_ent *cur, *smallest = NULL;
drivers/infiniband/hw/mlx5/mr.c
708
cur = rb_entry(node, struct mlx5_cache_ent, node);
drivers/infiniband/hw/mlx5/mr.c
709
cmp = cache_ent_key_cmp(cur->rb_key, rb_key);
drivers/infiniband/hw/mlx5/mr.c
711
smallest = cur;
drivers/infiniband/hw/mlx5/mr.c
717
return cur;
drivers/infiniband/hw/mlx5/wr.c
32
unsigned int cur;
drivers/infiniband/hw/mlx5/wr.c
34
cur = wq->head - wq->tail;
drivers/infiniband/hw/mlx5/wr.c
35
if (likely(cur + nreq < wq->max_post))
drivers/infiniband/hw/mlx5/wr.c
40
cur = wq->head - wq->tail;
drivers/infiniband/hw/mlx5/wr.c
43
return cur + nreq >= wq->max_post;
drivers/infiniband/hw/mthca/mthca_cmd.c
1726
int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
drivers/infiniband/hw/mthca/mthca_cmd.c
1774
if (op[cur][next] == CMD_ERR2RST_QPEE) {
drivers/infiniband/hw/mthca/mthca_cmd.c
1789
op[cur][next], CMD_TIME_CLASS_C);
drivers/infiniband/hw/mthca/mthca_cmd.c
1823
op_mod, op[cur][next], CMD_TIME_CLASS_C);
drivers/infiniband/hw/mthca/mthca_cmd.h
308
int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
drivers/infiniband/hw/mthca/mthca_qp.c
1571
unsigned cur;
drivers/infiniband/hw/mthca/mthca_qp.c
1574
cur = wq->head - wq->tail;
drivers/infiniband/hw/mthca/mthca_qp.c
1575
if (likely(cur + nreq < wq->max))
drivers/infiniband/hw/mthca/mthca_qp.c
1580
cur = wq->head - wq->tail;
drivers/infiniband/hw/mthca/mthca_qp.c
1583
return cur + nreq >= wq->max;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
906
struct list_head *cur;
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
910
list_for_each(cur, head) {
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
912
qp = list_entry(cur, struct ocrdma_qp, sq_entry);
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
914
qp = list_entry(cur, struct ocrdma_qp, rq_entry);
drivers/infiniband/hw/qedr/verbs.c
3323
u32 cur;
drivers/infiniband/hw/qedr/verbs.c
3334
cur = min_t(u32, len, seg_siz);
drivers/infiniband/hw/qedr/verbs.c
3335
memcpy(seg_prt, src, cur);
drivers/infiniband/hw/qedr/verbs.c
3338
seg_prt += cur;
drivers/infiniband/hw/qedr/verbs.c
3339
seg_siz -= cur;
drivers/infiniband/hw/qedr/verbs.c
3342
src += cur;
drivers/infiniband/hw/qedr/verbs.c
3343
len -= cur;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
360
struct arm_vsmmu_invalidation_cmd *cur;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
367
cur = cmds;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
378
while (cur != end) {
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
379
ret = arm_vsmmu_convert_user_cmd(vsmmu, cur);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
384
cur++;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
385
if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1)
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
390
cur - last, true);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
392
cur--;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
395
last = cur;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
398
array->entry_num = cur - cmds;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
146
struct kunit *test, const struct arm_smmu_ste *cur,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
150
struct arm_smmu_ste cur_copy = *cur;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
156
.init_entry = cur->data,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
167
arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
182
struct kunit *test, const struct arm_smmu_ste *cur,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
185
arm_smmu_v3_test_ste_expect_transition(test, cur, target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
190
struct kunit *test, const struct arm_smmu_ste *cur,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
193
arm_smmu_v3_test_ste_expect_transition(test, cur, target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
442
struct kunit *test, const struct arm_smmu_cd *cur,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
446
struct arm_smmu_cd cur_copy = *cur;
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
452
.init_entry = cur->data,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
463
arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
478
struct kunit *test, const struct arm_smmu_cd *cur,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
481
arm_smmu_v3_test_cd_expect_transition(test, cur, target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
486
struct kunit *test, const struct arm_smmu_cd *cur,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
489
arm_smmu_v3_test_cd_expect_transition(test, cur, target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1097
void arm_smmu_get_ste_update_safe(const __le64 *cur, const __le64 *target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1122
if (!((cur[1] | target[1]) & cpu_to_le64(eats_s1chk)) &&
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
1123
!((cur[2] | target[2]) & cpu_to_le64(STRTAB_STE_2_S2S)))
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
901
void (*get_update_safe)(const __le64 *cur, const __le64 *target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
914
void arm_smmu_get_ste_update_safe(const __le64 *cur, const __le64 *target,
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
916
void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
drivers/iommu/arm/arm-smmu/arm-smmu.c
181
it.cur = &pci_sid;
drivers/iommu/dma-iommu.c
1264
struct scatterlist *s, *cur = sg;
drivers/iommu/dma-iommu.c
1281
cur = sg_next(cur);
drivers/iommu/dma-iommu.c
1284
sg_dma_address(cur) = s_dma_addr;
drivers/iommu/dma-iommu.c
1285
sg_dma_len(cur) = s_length;
drivers/iommu/dma-iommu.c
1286
sg_dma_mark_bus_address(cur);
drivers/iommu/dma-iommu.c
1309
cur = sg_next(cur);
drivers/iommu/dma-iommu.c
1313
sg_dma_address(cur) = dma_addr + s_iova_off;
drivers/iommu/dma-iommu.c
1316
sg_dma_len(cur) = cur_len;
drivers/iommu/iommu-pages.c
192
struct ioptdesc *cur;
drivers/iommu/iommu-pages.c
195
list_for_each_entry(cur, &list->pages, iopt_freelist_elm) {
drivers/iommu/iommu-pages.c
196
if (WARN_ON(cur->incoherent))
drivers/iommu/iommu-pages.c
200
folio_address(ioptdesc_folio(cur)), dma_dev);
drivers/iommu/iommu-pages.c
220
struct ioptdesc *cur;
drivers/iommu/iommu-pages.c
222
list_for_each_entry(cur, &list->pages, iopt_freelist_elm) {
drivers/iommu/iommu-pages.c
223
struct folio *folio = ioptdesc_folio(cur);
drivers/iommu/iommu-pages.c
225
if (!cur->incoherent)
drivers/iommu/iommu-pages.c
228
ioptdesc_mem_size(cur), DMA_TO_DEVICE);
drivers/iommu/iommu-pages.c
229
cur->incoherent = 0;
drivers/iommu/iommufd/device.c
374
struct iommufd_sw_msi_map *cur;
drivers/iommu/iommufd/device.c
383
list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
drivers/iommu/iommufd/device.c
386
if (cur->sw_msi_start != igroup->sw_msi_start ||
drivers/iommu/iommufd/device.c
387
!test_bit(cur->id, igroup->required_sw_msi.bitmap))
drivers/iommu/iommufd/device.c
390
rc = iommufd_sw_msi_install(ictx, hwpt_paging, cur);
drivers/iommu/iommufd/device.c
737
struct iommufd_device *cur;
drivers/iommu/iommufd/device.c
743
xa_for_each(&attach->device_array, index, cur)
drivers/iommu/iommufd/device.c
744
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, cur->dev);
drivers/iommu/iommufd/device.c
753
struct iommufd_device *cur;
drivers/iommu/iommufd/device.c
762
xa_for_each(&attach->device_array, index, cur) {
drivers/iommu/iommufd/device.c
764
&hwpt_paging->ioas->iopt, cur->dev, NULL);
drivers/iommu/iommufd/driver.c
188
struct iommufd_sw_msi_map *cur;
drivers/iommu/iommufd/driver.c
193
list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
drivers/iommu/iommufd/driver.c
194
if (cur->sw_msi_start != sw_msi_start)
drivers/iommu/iommufd/driver.c
196
max_pgoff = max(max_pgoff, cur->pgoff + 1);
drivers/iommu/iommufd/driver.c
197
if (cur->msi_addr == msi_addr)
drivers/iommu/iommufd/driver.c
198
return cur;
drivers/iommu/iommufd/driver.c
205
cur = kzalloc_obj(*cur);
drivers/iommu/iommufd/driver.c
206
if (!cur)
drivers/iommu/iommufd/driver.c
209
cur->sw_msi_start = sw_msi_start;
drivers/iommu/iommufd/driver.c
210
cur->msi_addr = msi_addr;
drivers/iommu/iommufd/driver.c
211
cur->pgoff = max_pgoff;
drivers/iommu/iommufd/driver.c
212
cur->id = ictx->sw_msi_id++;
drivers/iommu/iommufd/driver.c
213
list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
drivers/iommu/iommufd/driver.c
214
return cur;
drivers/iommu/iommufd/eventq.c
227
struct iommufd_vevent *cur, *next;
drivers/iommu/iommufd/eventq.c
231
list_for_each_entry_safe(cur, next, &eventq->deliver, node) {
drivers/iommu/iommufd/eventq.c
232
list_del(&cur->node);
drivers/iommu/iommufd/eventq.c
233
if (cur != &veventq->lost_events_header)
drivers/iommu/iommufd/eventq.c
234
kfree(cur);
drivers/iommu/iommufd/eventq.c
306
struct iommufd_vevent *cur;
drivers/iommu/iommufd/eventq.c
313
while ((cur = iommufd_veventq_deliver_fetch(veventq))) {
drivers/iommu/iommufd/eventq.c
316
iommufd_veventq_deliver_restore(veventq, cur);
drivers/iommu/iommufd/eventq.c
319
hdr = &cur->header;
drivers/iommu/iommufd/eventq.c
322
if (!vevent_for_lost_events_header(cur) &&
drivers/iommu/iommufd/eventq.c
323
sizeof(hdr) + cur->data_len > count - done) {
drivers/iommu/iommufd/eventq.c
324
iommufd_veventq_deliver_restore(veventq, cur);
drivers/iommu/iommufd/eventq.c
329
iommufd_veventq_deliver_restore(veventq, cur);
drivers/iommu/iommufd/eventq.c
335
if (cur->data_len &&
drivers/iommu/iommufd/eventq.c
336
copy_to_user(buf + done, cur->event_data, cur->data_len)) {
drivers/iommu/iommufd/eventq.c
337
iommufd_veventq_deliver_restore(veventq, cur);
drivers/iommu/iommufd/eventq.c
342
if (!vevent_for_lost_events_header(cur))
drivers/iommu/iommufd/eventq.c
345
done += cur->data_len;
drivers/iommu/iommufd/eventq.c
346
kfree(cur);
drivers/iommu/iommufd/main.c
329
struct iommufd_sw_msi_map *cur;
drivers/iommu/iommufd/main.c
382
list_for_each_entry_safe(cur, next, &ictx->sw_msi_list, sw_msi_item)
drivers/iommu/iommufd/main.c
383
kfree(cur);
drivers/iommu/iommufd/pages.c
516
unsigned int cur = 0;
drivers/iommu/iommufd/pages.c
530
while (cur < batch->end) {
drivers/iommu/iommufd/pages.c
532
next_iova + batch->npfns[cur] * PAGE_SIZE -
drivers/iommu/iommufd/pages.c
537
PFN_PHYS(batch->pfns[cur]) + page_offset,
drivers/iommu/iommufd/pages.c
541
PFN_PHYS(batch->pfns[cur]) + page_offset,
drivers/iommu/iommufd/pages.c
548
cur++;
drivers/iommu/iommufd/pages.c
701
unsigned int cur = 0;
drivers/iommu/iommufd/pages.c
704
if (batch->npfns[cur] > first_page_off)
drivers/iommu/iommufd/pages.c
706
first_page_off -= batch->npfns[cur];
drivers/iommu/iommufd/pages.c
707
cur++;
drivers/iommu/iommufd/pages.c
712
batch->npfns[cur] - first_page_off);
drivers/iommu/iommufd/pages.c
715
pfn_to_page(batch->pfns[cur] + first_page_off),
drivers/iommu/iommufd/pages.c
718
cur++;
drivers/iommu/iommufd/pages.c
745
unsigned int cur = 0;
drivers/iommu/iommufd/pages.c
747
while (cur < batch->end) {
drivers/iommu/iommufd/pages.c
750
copy_data_page(pfn_to_page(batch->pfns[cur] + npage), data,
drivers/iommu/iommufd/pages.c
757
if (npage == batch->npfns[cur]) {
drivers/iommu/iommufd/pages.c
759
cur++;
drivers/iommu/iommufd/selftest.c
666
struct iommu_viommu_invalidate_selftest *cur;
drivers/iommu/iommufd/selftest.c
680
cur = cmds;
drivers/iommu/iommufd/selftest.c
690
while (cur != end) {
drivers/iommu/iommufd/selftest.c
695
if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
drivers/iommu/iommufd/selftest.c
700
if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) {
drivers/iommu/iommufd/selftest.c
707
(unsigned long)cur->vdev_id);
drivers/iommu/iommufd/selftest.c
715
if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
drivers/iommu/iommufd/selftest.c
720
mdev->cache[cur->cache_id] = 0;
drivers/iommu/iommufd/selftest.c
724
cur++;
drivers/iommu/iommufd/selftest.c
727
array->entry_num = cur - cmds;
drivers/iommu/iommufd/vfio_compat.c
373
struct vfio_info_cap_header __user *cur,
drivers/iommu/iommufd/vfio_compat.c
377
container_of(cur,
drivers/iommu/iommufd/vfio_compat.c
410
struct vfio_info_cap_header __user *cur,
drivers/iommu/iommufd/vfio_compat.c
429
copy_to_user(cur, &cap_dma, sizeof(cap_dma)))
drivers/iommu/iommufd/vfio_compat.c
438
struct vfio_info_cap_header __user *cur,
drivers/iommu/virtio-iommu.c
535
size_t cur = 0;
drivers/iommu/virtio-iommu.c
566
cur < viommu->probe_size) {
drivers/iommu/virtio-iommu.c
580
cur += len;
drivers/iommu/virtio-iommu.c
581
if (cur >= viommu->probe_size)
drivers/iommu/virtio-iommu.c
584
prop = (void *)probe->properties + cur;
drivers/leds/leds-powernv.c
246
const char *cur = NULL;
drivers/leds/leds-powernv.c
255
while ((cur = of_prop_next_string(p, cur)) != NULL) {
drivers/leds/leds-powernv.c
264
rc = powernv_led_create(dev, powernv_led, cur);
drivers/md/bcache/journal.c
621
j->cur = (j->cur == j->w)
drivers/md/bcache/journal.c
632
j->cur->data->seq = ++j->seq;
drivers/md/bcache/journal.c
633
j->cur->dirty = false;
drivers/md/bcache/journal.c
634
j->cur->need_write = false;
drivers/md/bcache/journal.c
635
j->cur->data->keys = 0;
drivers/md/bcache/journal.c
654
struct journal_write *w = (j->cur == j->w)
drivers/md/bcache/journal.c
676
struct journal_write *w = c->journal.cur;
drivers/md/bcache/journal.c
761
struct journal_write *w = c->journal.cur;
drivers/md/bcache/journal.c
787
struct journal_write *w = c->journal.cur;
drivers/md/bcache/journal.c
835
if (c->journal.cur->dirty)
drivers/md/bcache/journal.h
122
struct journal_write w[2], *cur;
drivers/md/bcache/super.c
1751
if (c->journal.cur) {
drivers/md/dm-ioctl.c
1505
static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
drivers/md/dm-ioctl.c
1507
if (cur == new ||
drivers/md/dm-ioctl.c
1508
(cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
drivers/media/common/cx2341x.c
1318
return ctrl && ctrl->val != ctrl->cur.val;
drivers/media/i2c/adp1653.c
150
ctrl->cur.val = 0;
drivers/media/i2c/adp1653.c
153
ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
drivers/media/i2c/adp1653.c
155
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
drivers/media/i2c/adp1653.c
157
ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
drivers/media/i2c/adp1653.c
159
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
drivers/media/i2c/imx274.c
1299
v_pos = imx274->ctrls.vflip->cur.val ?
drivers/media/i2c/imx296.c
331
vmax = format->height + sensor->vblank->cur.val;
drivers/media/i2c/imx296.c
560
format->height + sensor->vblank->cur.val, &ret);
drivers/media/i2c/imx415.c
732
vmax = format->height + sensor->vblank->cur.val;
drivers/media/i2c/lm3560.c
179
ctrl->cur.val = fault;
drivers/media/i2c/max2175.c
1012
max2175_tune_rf_freq(ctx, ctx->freq, ctx->hsls->cur.val);
drivers/media/i2c/max2175.c
1016
ctx->hsls->cur.val);
drivers/media/i2c/max2175.c
1088
ctx->rx_mode->cur.val = rx_mode;
drivers/media/i2c/max2175.c
1091
return max2175_tune_rf_freq(ctx, freq, ctx->hsls->cur.val);
drivers/media/i2c/max2175.c
1112
max2175_freq_rx_mode_valid(ctx, ctx->rx_mode->cur.val, freq))
drivers/media/i2c/max2175.c
1113
ret = max2175_tune_rf_freq(ctx, freq, ctx->hsls->cur.val);
drivers/media/i2c/max2175.c
1119
ret, ctx->freq, ctx->mode_resolved, ctx->rx_mode->cur.val);
drivers/media/i2c/max2175.c
791
max2175_set_hsls(ctx, ctx->hsls->cur.val);
drivers/media/i2c/max2175.c
794
max2175_i2s_enable(ctx, ctx->i2s_en->cur.val);
drivers/media/i2c/msp3400-driver.c
392
state->volume->val = state->volume->cur.val;
drivers/media/i2c/msp3400-driver.c
393
state->muted->val = state->muted->cur.val;
drivers/media/i2c/mt9m114.c
1155
sensor->pa.exposure->cur.val = sensor->pa.exposure->val;
drivers/media/i2c/mt9m114.c
1159
sensor->pa.gain->cur.val = sensor->pa.gain->val;
drivers/media/i2c/mt9p031.c
739
if (mt9p031->blc_auto->cur.val != 0) {
drivers/media/i2c/mt9p031.c
746
if (mt9p031->blc_offset->cur.val != 0) {
drivers/media/i2c/mt9p031.c
748
mt9p031->blc_offset->cur.val);
drivers/media/i2c/ov64a40.c
2903
timings = ov64a40_get_timings(ov64a40, ov64a40->link_freq->cur.val);
drivers/media/i2c/ov64a40.c
2981
timings = ov64a40_get_timings(ov64a40, ov64a40->link_freq->cur.val);
drivers/media/i2c/ov64a40.c
2986
delay += DIV_ROUND_UP(timings->ppl * 4 * ov64a40->exposure->cur.val,
drivers/media/i2c/ov64a40.c
3170
ov64a40->link_freq->cur.val);
drivers/media/i2c/ov64a40.c
3274
int exp_val = min(ov64a40->exposure->cur.val, exp_max);
drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
188
if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_EXPOSURE) {
drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
195
if (((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_WHITE_BALANCE)
drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
203
if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
drivers/media/i2c/vd55g1.c
1472
if (ctrl->val != ctrl->cur.val) {
drivers/media/i2c/vd55g1.c
887
sensor->expo_ctrl->cur.val = exposure;
drivers/media/i2c/vd55g1.c
888
sensor->again_ctrl->cur.val = again;
drivers/media/i2c/vd55g1.c
889
sensor->dgain_ctrl->cur.val = dgain;
drivers/media/i2c/vd56g3.c
370
sensor->expo_ctrl->cur.val = exposure;
drivers/media/i2c/vd56g3.c
371
sensor->again_ctrl->cur.val = again;
drivers/media/i2c/vd56g3.c
372
sensor->dgain_ctrl->cur.val = dgain;
drivers/media/pci/cx18/cx18-av-core.c
251
state->volume->cur.val = state->volume->default_value = default_volume;
drivers/media/pci/cx18/cx18-driver.c
744
cx->temporal_strength = cx->cxhdl.video_temporal_filter->cur.val;
drivers/media/pci/cx18/cx18-driver.c
745
cx->spatial_strength = cx->cxhdl.video_spatial_filter->cur.val;
drivers/media/pci/cx18/cx18-driver.c
746
cx->filter_mode = cx->cxhdl.video_spatial_filter_mode->cur.val |
drivers/media/pci/cx18/cx18-driver.c
747
(cx->cxhdl.video_temporal_filter_mode->cur.val << 1) |
drivers/media/pci/cx18/cx18-driver.c
748
(cx->cxhdl.video_median_filter_type->cur.val << 2);
drivers/media/pci/mantis/mantis_ioc.c
69
u32 cur;
drivers/media/pci/mantis/mantis_ioc.c
72
cur = mmread(MANTIS_GPIF_ADDR);
drivers/media/pci/mantis/mantis_ioc.c
74
mantis->gpio_status = cur | (1 << bitpos);
drivers/media/pci/mantis/mantis_ioc.c
76
mantis->gpio_status = cur & (~(1 << bitpos));
drivers/media/platform/qcom/camss/camss-csid.c
1264
csid->testgen_mode->cur.val != 0)
drivers/media/platform/qcom/camss/camss-csid.c
845
csid->testgen_mode->cur.val == 0) {
drivers/media/platform/qcom/camss/camss-csid.c
896
csid->testgen_mode->cur.val == 0) {
drivers/media/platform/renesas/vsp1/vsp1_hgo.c
157
hgo->max_rgb = hgo->ctrls.max_rgb->cur.val;
drivers/media/platform/renesas/vsp1/vsp1_hgo.c
159
hgo->num_bins = hgo_num_bins[hgo->ctrls.num_bins->cur.val];
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
164
struct rkvdec_vp9_frame_info cur;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
419
seg = vp9_ctx->last.valid ? &vp9_ctx->last.seg : &vp9_ctx->cur.seg;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
461
vp9_ctx->cur.valid = true;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
462
vp9_ctx->cur.reference_mode = dec_params->reference_mode;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
463
vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
464
vp9_ctx->cur.flags = dec_params->flags;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
465
vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
466
vp9_ctx->cur.seg = dec_params->seg;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
467
vp9_ctx->cur.lf = dec_params->lf;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
472
vp9_ctx->last = vp9_ctx->cur;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
538
vp9_ctx->cur.segmapid = vp9_ctx->last.segmapid;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
543
vp9_ctx->cur.segmapid++;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
551
regs->vp9.reg28.tx_mode = vp9_ctx->cur.tx_mode;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
561
lf = &vp9_ctx->cur.lf;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
640
(RKVDEC_VP9_MAX_SEGMAP_SIZE * (!vp9_ctx->cur.segmapid));
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
644
(RKVDEC_VP9_MAX_SEGMAP_SIZE * vp9_ctx->cur.segmapid);
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
724
vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
745
vp9_ctx->cur.frame_context_idx = fctx_idx;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
837
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
840
fctx_idx = vp9_ctx->cur.frame_context_idx;
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
842
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
845
bool frame_is_intra = vp9_ctx->cur.flags &
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
883
vp9_ctx->cur.reference_mode,
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
884
vp9_ctx->cur.interpolation_filter,
drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c
885
vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
642
fimc_set_color_effect(ctx, ctrls->colorfx->cur.val);
drivers/media/platform/samsung/exynos4-is/fimc-core.c
667
if (ctrl->cur.val > ctrl->maximum)
drivers/media/platform/samsung/exynos4-is/fimc-core.c
668
ctrl->cur.val = ctrl->maximum;
drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c
19
char *cur = str;
drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c
25
snprintf(cur, left, "[MJPEG header]\n"
drivers/media/platform/ti/vpe/vpe.c
829
const struct vpe_dei_regs *cur = &dei_regs;
drivers/media/platform/ti/vpe/vpe.c
831
dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
drivers/media/platform/ti/vpe/vpe.c
832
dei_mmr[3] = cur->edi_config_reg;
drivers/media/platform/ti/vpe/vpe.c
833
dei_mmr[4] = cur->edi_lut_reg0;
drivers/media/platform/ti/vpe/vpe.c
834
dei_mmr[5] = cur->edi_lut_reg1;
drivers/media/platform/ti/vpe/vpe.c
835
dei_mmr[6] = cur->edi_lut_reg2;
drivers/media/platform/ti/vpe/vpe.c
836
dei_mmr[7] = cur->edi_lut_reg3;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
121
vp9_ctx->cur.valid = true;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
122
vp9_ctx->cur.reference_mode = dec_params->reference_mode;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
123
vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
124
vp9_ctx->cur.flags = dec_params->flags;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
125
vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
46
vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
553
hantro_reg_write(ctx->dev, &vp9_transform_mode, vp9_ctx->cur.tx_mode);
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
67
vp9_ctx->cur.frame_context_idx = fctx_idx;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
927
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
930
fctx_idx = vp9_ctx->cur.frame_context_idx;
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
932
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
935
bool frame_is_intra = vp9_ctx->cur.flags &
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
986
vp9_ctx->cur.reference_mode,
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
987
vp9_ctx->cur.interpolation_filter,
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
988
vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
995
vp9_ctx->last = vp9_ctx->cur;
drivers/media/platform/verisilicon/hantro_hw.h
246
struct hantro_vp9_frame_info cur;
drivers/media/platform/xilinx/xilinx-tpg.c
220
XTPG_PATTERN_MASK, xtpg->pattern->cur.val);
drivers/media/platform/xilinx/xilinx-tpg.c
226
passthrough = xtpg->pattern->cur.val == 0;
drivers/media/radio/radio-isa.c
281
ops->s_mute_volume(isa, true, isa->volume ? isa->volume->cur.val : 0);
drivers/media/radio/radio-sf16fmr2.c
158
balance = fmr2->balance->cur.val;
drivers/media/radio/radio-sf16fmr2.c
162
volume = fmr2->volume->cur.val;
drivers/media/test-drivers/vicodec/codec-fwht.c
616
decide_blocktype(const u8 *cur, const u8 *reference, s16 *deltablock,
drivers/media/test-drivers/vicodec/codec-fwht.c
626
fill_encoder_block(cur, tmp, stride, input_step);
drivers/media/test-drivers/vivid/vivid-core.c
1776
skip_mask & ~(1ULL << c->cur.val),
drivers/media/test-drivers/vivid/vivid-core.c
1794
skip_mask & ~(1ULL << c->cur.val),
drivers/media/test-drivers/vivid/vivid-ctrls.c
661
output_inst = vivid_ctrl_hdmi_to_output_instance[ctrl->cur.val];
drivers/media/test-drivers/vivid/vivid-ctrls.c
662
index = vivid_ctrl_hdmi_to_output_index[ctrl->cur.val];
drivers/media/test-drivers/vivid/vivid-ctrls.c
679
hdmi_to_output_menu_skip_mask &= ~(1ULL << ctrl->cur.val);
drivers/media/test-drivers/vivid/vivid-ctrls.c
686
if (ctrl->val < FIXED_MENU_ITEMS && ctrl->cur.val < FIXED_MENU_ITEMS)
drivers/media/test-drivers/vivid/vivid-ctrls.c
695
output_inst = vivid_ctrl_svid_to_output_instance[ctrl->cur.val];
drivers/media/test-drivers/vivid/vivid-ctrls.c
696
index = vivid_ctrl_svid_to_output_index[ctrl->cur.val];
drivers/media/test-drivers/vivid/vivid-ctrls.c
710
svid_to_output_menu_skip_mask &= ~(1ULL << ctrl->cur.val);
drivers/media/test-drivers/vivid/vivid-ctrls.c
716
if (ctrl->val < FIXED_MENU_ITEMS && ctrl->cur.val < FIXED_MENU_ITEMS)
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
506
dev->brightness->cur.val,
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
507
dev->contrast->cur.val,
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
508
dev->saturation->cur.val,
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
509
dev->hue->cur.val);
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
513
dev->autogain->cur.val, gain, dev->alpha->cur.val);
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
519
dev->volume->cur.val, dev->mute->cur.val);
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
524
dev->int32->cur.val,
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
525
dev->ro_int32->cur.val,
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
527
dev->bitmask->cur.val);
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
530
dev->boolean->cur.val,
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
531
dev->menu->qmenu[dev->menu->cur.val],
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
535
dev->int_menu->qmenu_int[dev->int_menu->cur.val],
drivers/media/test-drivers/vivid/vivid-kthread-cap.c
536
dev->int_menu->cur.val);
drivers/media/test-drivers/vivid/vivid-radio-common.c
70
rds->picode = dev->radio_tx_rds_pi->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
71
rds->pty = dev->radio_tx_rds_pty->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
72
rds->mono_stereo = dev->radio_tx_rds_mono_stereo->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
73
rds->art_head = dev->radio_tx_rds_art_head->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
74
rds->compressed = dev->radio_tx_rds_compressed->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
75
rds->dyn_pty = dev->radio_tx_rds_dyn_pty->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
76
rds->ta = dev->radio_tx_rds_ta->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
77
rds->tp = dev->radio_tx_rds_tp->cur.val;
drivers/media/test-drivers/vivid/vivid-radio-common.c
78
rds->ms = dev->radio_tx_rds_ms->cur.val;
drivers/media/tuners/e4000.c
382
dev->lna_gain_auto->cur.val, dev->lna_gain_auto->val,
drivers/media/tuners/e4000.c
383
dev->lna_gain->cur.val, dev->lna_gain->val);
drivers/media/tuners/e4000.c
385
if (dev->lna_gain_auto->val && dev->if_gain_auto->cur.val)
drivers/media/tuners/e4000.c
389
else if (dev->if_gain_auto->cur.val)
drivers/media/tuners/e4000.c
418
dev->mixer_gain_auto->cur.val, dev->mixer_gain_auto->val,
drivers/media/tuners/e4000.c
419
dev->mixer_gain->cur.val, dev->mixer_gain->val);
drivers/media/tuners/e4000.c
451
dev->if_gain_auto->cur.val, dev->if_gain_auto->val,
drivers/media/tuners/e4000.c
452
dev->if_gain->cur.val, dev->if_gain->val);
drivers/media/tuners/e4000.c
454
if (dev->if_gain_auto->val && dev->lna_gain_auto->cur.val)
drivers/media/tuners/e4000.c
456
else if (dev->lna_gain_auto->cur.val)
drivers/media/tuners/fc2580.c
474
ctrl->id, ctrl->name, ctrl->cur.val, ctrl->val);
drivers/media/tuners/msi001.c
267
ret = msi001_set_gain(dev, dev->lna_gain->cur.val,
drivers/media/tuners/msi001.c
268
dev->mixer_gain->cur.val, dev->if_gain->cur.val);
drivers/media/tuners/msi001.c
397
dev->mixer_gain->cur.val,
drivers/media/tuners/msi001.c
398
dev->if_gain->cur.val);
drivers/media/tuners/msi001.c
401
ret = msi001_set_gain(dev, dev->lna_gain->cur.val,
drivers/media/tuners/msi001.c
403
dev->if_gain->cur.val);
drivers/media/tuners/msi001.c
406
ret = msi001_set_gain(dev, dev->lna_gain->cur.val,
drivers/media/tuners/msi001.c
407
dev->mixer_gain->cur.val,
drivers/media/usb/airspy/airspy.c
868
s->lna_gain_auto->cur.val, s->lna_gain_auto->val,
drivers/media/usb/airspy/airspy.c
869
s->lna_gain->cur.val, s->lna_gain->val);
drivers/media/usb/airspy/airspy.c
895
s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val,
drivers/media/usb/airspy/airspy.c
896
s->mixer_gain->cur.val, s->mixer_gain->val);
drivers/media/usb/airspy/airspy.c
921
dev_dbg(s->dev, "val=%d->%d\n", s->if_gain->cur.val, s->if_gain->val);
drivers/media/usb/gspca/conex.c
878
setbrightness(gspca_dev, ctrl->val, sd->sat->cur.val);
drivers/media/usb/gspca/conex.c
881
setcontrast(gspca_dev, ctrl->val, sd->sat->cur.val);
drivers/media/usb/gspca/conex.c
884
setbrightness(gspca_dev, sd->brightness->cur.val, ctrl->val);
drivers/media/usb/gspca/conex.c
885
setcontrast(gspca_dev, sd->contrast->cur.val, ctrl->val);
drivers/media/usb/gspca/sn9c20x.c
2233
s32 curqual = sd->jpegqual->cur.val;
drivers/media/usb/gspca/sn9c20x.c
2241
sd->jpegqual->cur.val = new_qual;
drivers/media/usb/gspca/topro.c
3970
s32 old = gspca_dev->gain->cur.val ?
drivers/media/usb/gspca/topro.c
3971
gspca_dev->gain->cur.val : 1;
drivers/media/usb/hackrf/hackrf.c
311
dev->rx_bandwidth->cur.val = uitmp;
drivers/media/usb/hackrf/hackrf.c
327
dev->tx_bandwidth->cur.val = uitmp;
drivers/media/usb/hackrf/hackrf.c
376
dev->rx_rf_gain->cur.val, dev->rx_rf_gain->val);
drivers/media/usb/hackrf/hackrf.c
388
dev->tx_rf_gain->cur.val, dev->tx_rf_gain->val);
drivers/media/usb/hackrf/hackrf.c
400
dev->rx_lna_gain->cur.val, dev->rx_lna_gain->val);
drivers/media/usb/hackrf/hackrf.c
411
dev->rx_if_gain->cur.val, dev->rx_if_gain->val);
drivers/media/usb/hackrf/hackrf.c
422
dev->tx_lna_gain->cur.val, dev->tx_lna_gain->val);
drivers/media/v4l2-core/v4l2-ctrls-core.c
2174
ctrl->cur.val = ctrl->val = def;
drivers/media/v4l2-core/v4l2-ctrls-core.c
2192
ctrl->p_cur.p = &ctrl->cur.val;
drivers/media/v4l2-core/v4l2-ctrls-priv.h
33
return master->is_auto && master->cur.val == master->manual_mode_value;
drivers/misc/ibmvmc.c
183
queue->cur = 0;
drivers/misc/ibmvmc.c
211
crq = &queue->msgs[queue->cur];
drivers/misc/ibmvmc.c
213
if (++queue->cur == queue->size)
drivers/misc/ibmvmc.c
214
queue->cur = 0;
drivers/misc/ibmvmc.c
2150
queue->cur = 0;
drivers/misc/ibmvmc.h
154
int size, cur;
drivers/misc/lkdtm/core.c
448
struct crashpoint *cur = &crashpoints[i];
drivers/misc/lkdtm/core.c
450
debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, cur,
drivers/misc/lkdtm/core.c
451
&cur->fops);
drivers/misc/ocxl/pasid.c
15
struct id_range *cur;
drivers/misc/ocxl/pasid.c
18
list_for_each_entry(cur, head, list) {
drivers/misc/ocxl/pasid.c
19
pr_debug("Range %d->%d\n", cur->start, cur->end);
drivers/misc/ocxl/pasid.c
28
struct id_range *cur, *new;
drivers/misc/ocxl/pasid.c
37
list_for_each_entry(cur, head, list) {
drivers/misc/ocxl/pasid.c
38
if ((cur->start - last_end) > size)
drivers/misc/ocxl/pasid.c
40
last_end = cur->end;
drivers/misc/ocxl/pasid.c
41
pos = &cur->list;
drivers/misc/ocxl/pasid.c
65
struct id_range *cur, *tmp;
drivers/misc/ocxl/pasid.c
67
list_for_each_entry_safe(cur, tmp, head, list) {
drivers/misc/ocxl/pasid.c
68
if (cur->start == start && cur->end == (start + size - 1)) {
drivers/misc/ocxl/pasid.c
70
list_del(&cur->list);
drivers/misc/ocxl/pasid.c
71
kfree(cur);
drivers/misc/vmw_vmci/vmci_event.c
50
struct vmci_subscription *cur, *p2;
drivers/misc/vmw_vmci/vmci_event.c
51
list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
drivers/misc/vmw_vmci/vmci_event.c
59
list_del(&cur->node);
drivers/misc/vmw_vmci/vmci_event.c
60
kfree(cur);
drivers/misc/vmw_vmci/vmci_event.c
73
struct vmci_subscription *cur;
drivers/misc/vmw_vmci/vmci_event.c
74
list_for_each_entry(cur, &subscriber_array[e], node) {
drivers/misc/vmw_vmci/vmci_event.c
75
if (cur->id == sub_id)
drivers/misc/vmw_vmci/vmci_event.c
76
return cur;
drivers/misc/vmw_vmci/vmci_event.c
88
struct vmci_subscription *cur;
drivers/misc/vmw_vmci/vmci_event.c
96
list_for_each_entry_rcu(cur, subscriber_list, node) {
drivers/misc/vmw_vmci/vmci_event.c
97
cur->callback(cur->id, &event_msg->event_data,
drivers/misc/vmw_vmci/vmci_event.c
98
cur->callback_data);
drivers/mmc/host/mmci.c
1005
dmae->cur = NULL;
drivers/mmc/host/mmci.c
1098
if (dmae->cur && dmae->desc_current)
drivers/mmc/host/mmci.c
1102
return _mmci_dmae_prep_data(host, data, &dmae->cur,
drivers/mmc/host/mmci.c
1117
dma_async_issue_pending(dmae->cur);
drivers/mmc/host/mmci.c
1135
dmae->cur = next->chan;
drivers/mmc/host/mmci.c
1163
if (dmae->cur == next->chan) {
drivers/mmc/host/mmci.c
1165
dmae->cur = NULL;
drivers/mmc/host/mmci.c
837
struct dma_chan *cur;
drivers/mmc/host/mmci.c
955
dmaengine_terminate_all(dmae->cur);
drivers/mmc/host/mmci.c
957
dmae->cur = NULL;
drivers/mtd/mtdswap.c
206
struct swap_eb *cur;
drivers/mtd/mtdswap.c
211
cur = rb_entry(parent, struct swap_eb, rb);
drivers/mtd/mtdswap.c
212
if (eb->erase_count > cur->erase_count)
drivers/mtd/nand/ecc-sw-hamming.c
121
u32 cur;
drivers/mtd/nand/ecc-sw-hamming.c
152
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
153
tmppar = cur;
drivers/mtd/nand/ecc-sw-hamming.c
154
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
155
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
156
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
158
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
159
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
160
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
161
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
162
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
165
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
166
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
167
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
168
rp6 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
169
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
170
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
171
rp6 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
172
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
173
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
174
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
175
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
176
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
179
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
180
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
181
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
182
rp6 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
183
rp8 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
184
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
185
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
186
rp6 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
187
rp8 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
188
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
189
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
190
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
191
rp8 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
192
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
193
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
194
rp8 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
196
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
197
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
198
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
199
rp6 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
200
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
201
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
202
rp6 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
203
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
204
tmppar ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
205
rp4 ^= cur;
drivers/mtd/nand/ecc-sw-hamming.c
206
cur = *bp++;
drivers/mtd/nand/ecc-sw-hamming.c
207
tmppar ^= cur;
drivers/net/bonding/bond_main.c
4465
int cur = 0, max = 0;
drivers/net/bonding/bond_main.c
4479
dev_stack[cur] = now;
drivers/net/bonding/bond_main.c
4480
iter_stack[cur++] = iter;
drivers/net/bonding/bond_main.c
4481
if (max <= cur)
drivers/net/bonding/bond_main.c
4482
max = cur;
drivers/net/bonding/bond_main.c
4487
if (!cur)
drivers/net/bonding/bond_main.c
4489
next = dev_stack[--cur];
drivers/net/bonding/bond_main.c
4490
niter = iter_stack[cur];
drivers/net/dsa/hirschmann/hellcreek.c
1582
const struct tc_taprio_sched_entry *cur, *initial, *next;
drivers/net/dsa/hirschmann/hellcreek.c
1585
cur = initial = &schedule->entries[0];
drivers/net/dsa/hirschmann/hellcreek.c
1586
next = cur + 1;
drivers/net/dsa/hirschmann/hellcreek.c
1594
cur->gate_mask;
drivers/net/dsa/hirschmann/hellcreek.c
1597
cur->gate_mask;
drivers/net/dsa/hirschmann/hellcreek.c
1609
cur->interval & 0x0000ffff,
drivers/net/dsa/hirschmann/hellcreek.c
1612
(cur->interval & 0xffff0000) >> 16,
drivers/net/dsa/hirschmann/hellcreek.c
1621
cur++;
drivers/net/dsa/microchip/ksz8.c
379
u32 cur;
drivers/net/dsa/microchip/ksz8.c
398
cur = last[addr];
drivers/net/dsa/microchip/ksz8.c
399
if (data != cur) {
drivers/net/dsa/microchip/ksz8.c
401
if (data < cur)
drivers/net/dsa/microchip/ksz8.c
403
data -= cur;
drivers/net/dsa/mv88e6xxx/devlink.c
578
u16 *pvt, *cur;
drivers/net/dsa/mv88e6xxx/devlink.c
586
cur = pvt;
drivers/net/dsa/mv88e6xxx/devlink.c
589
err = mv88e6xxx_g2_pvt_read(chip, dev, port, cur);
drivers/net/dsa/mv88e6xxx/devlink.c
593
cur++;
drivers/net/ethernet/actions/owl-emac.c
146
unsigned int cur)
drivers/net/ethernet/actions/owl-emac.c
148
return (cur + 1) & (ring->size - 1);
drivers/net/ethernet/airoha/airoha_eth.c
711
int cur, done = 0;
drivers/net/ethernet/airoha/airoha_eth.c
714
cur = airoha_qdma_rx_process(q, budget - done);
drivers/net/ethernet/airoha/airoha_eth.c
715
done += cur;
drivers/net/ethernet/airoha/airoha_eth.c
716
} while (cur && done < budget);
drivers/net/ethernet/amd/pds_core/core.c
164
struct pdsc_q_info *cur;
drivers/net/ethernet/amd/pds_core/core.c
170
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
drivers/net/ethernet/amd/pds_core/core.c
171
cur->desc = base + (i * q->desc_size);
drivers/net/ethernet/amd/pds_core/core.c
172
init_completion(&cur->completion);
drivers/net/ethernet/amd/pds_core/core.c
178
struct pdsc_cq_info *cur;
drivers/net/ethernet/amd/pds_core/core.c
184
for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
drivers/net/ethernet/amd/pds_core/core.c
185
cur->comp = base + (i * cq->desc_size);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
313
ring->cur = 0;
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
357
ring->cur = 0;
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
431
DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
434
start_index = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
435
cur_index = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1454
int start_index = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1532
unsigned int start_index = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1574
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1599
int start_index = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1600
int cur_index = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1811
ring->cur = cur_index + 1;
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1821
(ring->cur - 1) & (ring->rdesc_count - 1));
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1836
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1838
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1849
xgbe_dump_rx_desc(pdata, ring, ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
1981
ring->cur & (ring->rdesc_count - 1), ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
156
return (ring->rdesc_count - (ring->cur - ring->dirty));
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
161
return (ring->cur - ring->dirty);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2058
while (ring->dirty != ring->cur) {
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2158
unsigned int cur;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2166
cur = ring->cur;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2174
(ring->dirty != cur)) {
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2244
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2247
DBGPR(" cur = %d\n", ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2262
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2271
ring->cur++;
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2398
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/amd/xgbe/xgbe.h
398
unsigned int cur;
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1206
if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1211
if (!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_TX) ^
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1212
!!(self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX))
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
166
self->aq_nic_cfg.fc.cur = fc;
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
38
enum aq_fc_mode cur;
drivers/net/ethernet/atheros/alx/main.c
124
cur = next;
drivers/net/ethernet/atheros/alx/main.c
127
cur_buf = &rxq->bufs[cur];
drivers/net/ethernet/atheros/alx/main.c
134
rxq->write_idx = cur;
drivers/net/ethernet/atheros/alx/main.c
135
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
drivers/net/ethernet/atheros/alx/main.c
77
u16 cur, next, count = 0;
drivers/net/ethernet/atheros/alx/main.c
79
next = cur = rxq->write_idx;
drivers/net/ethernet/atheros/alx/main.c
82
cur_buf = &rxq->bufs[cur];
drivers/net/ethernet/atheros/alx/main.c
85
struct alx_rfd *rfd = &rxq->rfd[cur];
drivers/net/ethernet/broadcom/asp2/bcmasp.c
601
struct ethtool_rx_flow_spec *cur;
drivers/net/ethernet/broadcom/asp2/bcmasp.c
610
cur = &priv->net_filters[i].fs;
drivers/net/ethernet/broadcom/asp2/bcmasp.c
612
if (cur->flow_type != fs->flow_type ||
drivers/net/ethernet/broadcom/asp2/bcmasp.c
613
cur->ring_cookie != fs->ring_cookie)
drivers/net/ethernet/broadcom/asp2/bcmasp.c
635
if (memcmp(&cur->h_u, &fs->h_u, fs_size) ||
drivers/net/ethernet/broadcom/asp2/bcmasp.c
636
memcmp(&cur->m_u, &fs->m_u, fs_size))
drivers/net/ethernet/broadcom/asp2/bcmasp.c
639
if (cur->flow_type & FLOW_EXT) {
drivers/net/ethernet/broadcom/asp2/bcmasp.c
640
if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype ||
drivers/net/ethernet/broadcom/asp2/bcmasp.c
641
cur->m_ext.vlan_etype != fs->m_ext.vlan_etype ||
drivers/net/ethernet/broadcom/asp2/bcmasp.c
642
cur->h_ext.vlan_tci != fs->h_ext.vlan_tci ||
drivers/net/ethernet/broadcom/asp2/bcmasp.c
643
cur->m_ext.vlan_tci != fs->m_ext.vlan_tci ||
drivers/net/ethernet/broadcom/asp2/bcmasp.c
644
cur->h_ext.data[0] != fs->h_ext.data[0])
drivers/net/ethernet/broadcom/asp2/bcmasp.c
647
if (cur->flow_type & FLOW_MAC_EXT) {
drivers/net/ethernet/broadcom/asp2/bcmasp.c
648
if (memcmp(&cur->h_ext.h_dest,
drivers/net/ethernet/broadcom/asp2/bcmasp.c
650
memcmp(&cur->m_ext.h_dest,
drivers/net/ethernet/broadcom/b44.c
594
u32 cur, cons;
drivers/net/ethernet/broadcom/b44.c
597
cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
drivers/net/ethernet/broadcom/b44.c
598
cur /= sizeof(struct dma_desc);
drivers/net/ethernet/broadcom/b44.c
601
for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2526
u32 cur;
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2531
cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
2533
cur, state);
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
3019
u64 cur[BNX2X_MCAST_VEC_SZ], req[BNX2X_MCAST_VEC_SZ];
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
3026
memcpy(cur, o->registry.aprox_match.vec,
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
3051
bool b_current = !!BIT_VEC64_TEST_BIT(cur, i);
drivers/net/ethernet/engleder/tsnep_rxnfc.c
56
struct tsnep_rxnfc_rule *pred, *cur;
drivers/net/ethernet/engleder/tsnep_rxnfc.c
61
list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
drivers/net/ethernet/engleder/tsnep_rxnfc.c
62
if (cur->location >= rule->location)
drivers/net/ethernet/engleder/tsnep_rxnfc.c
64
pred = cur;
drivers/net/ethernet/fealnx.c
1375
struct fealnx_desc *cur;
drivers/net/ethernet/fealnx.c
1385
cur = &np->tx_ring[i];
drivers/net/ethernet/fealnx.c
1386
if (cur->skbuff) {
drivers/net/ethernet/fealnx.c
1387
dma_unmap_single(&np->pci_dev->dev, cur->buffer,
drivers/net/ethernet/fealnx.c
1388
cur->skbuff->len, DMA_TO_DEVICE);
drivers/net/ethernet/fealnx.c
1389
dev_kfree_skb_any(cur->skbuff);
drivers/net/ethernet/fealnx.c
1390
cur->skbuff = NULL;
drivers/net/ethernet/fealnx.c
1392
cur->status = 0;
drivers/net/ethernet/fealnx.c
1393
cur->control = 0; /* needed? */
drivers/net/ethernet/fealnx.c
1395
cur->next_desc = np->tx_ring_dma +
drivers/net/ethernet/fealnx.c
1397
cur->next_desc_logical = &np->tx_ring[i + 1];
drivers/net/ethernet/fealnx.c
1409
struct fealnx_desc *cur = np->cur_rx;
drivers/net/ethernet/fealnx.c
1415
if (cur->skbuff)
drivers/net/ethernet/fealnx.c
1416
cur->status = RXOWN;
drivers/net/ethernet/fealnx.c
1417
cur = cur->next_desc_logical;
drivers/net/ethernet/fealnx.c
1643
struct fealnx_desc *cur;
drivers/net/ethernet/fealnx.c
1646
cur = np->cur_rx;
drivers/net/ethernet/fealnx.c
1649
if ((!(cur->status & RXOWN)) &&
drivers/net/ethernet/fealnx.c
1650
(cur->status & RXLSD))
drivers/net/ethernet/fealnx.c
1653
cur = cur->next_desc_logical;
drivers/net/ethernet/freescale/fec.h
507
struct bufdesc *cur;
drivers/net/ethernet/freescale/fec_main.c
1008
rxq->bd.cur = rxq->bd.base;
drivers/net/ethernet/freescale/fec_main.c
1015
txq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
1510
bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
1549
txq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
1582
while (bdp != READ_ONCE(txq->bd.cur)) {
drivers/net/ethernet/freescale/fec_main.c
1715
if (bdp != txq->bd.cur &&
drivers/net/ethernet/freescale/fec_main.c
1900
struct bufdesc *bdp = rxq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
1996
rxq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
2024
struct bufdesc *bdp = rxq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
2172
rxq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
2227
bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
2262
txq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
2275
struct bufdesc *bdp = rxq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
2434
rxq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
342
(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
drivers/net/ethernet/freescale/fec_main.c
372
bdp == txq->bd.cur ? 'S' : ' ',
drivers/net/ethernet/freescale/fec_main.c
4692
bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
4757
txq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
4974
rxq->bd.cur = cbd_base;
drivers/net/ethernet/freescale/fec_main.c
4990
txq->bd.cur = cbd_base;
drivers/net/ethernet/freescale/fec_main.c
513
struct bufdesc *bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
587
bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
625
bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
713
txq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
841
struct bufdesc *bdp = txq->bd.cur;
drivers/net/ethernet/freescale/fec_main.c
904
txq->bd.cur = bdp;
drivers/net/ethernet/freescale/fec_main.c
913
tmp_bdp = txq->bd.cur;
drivers/net/ethernet/ibm/ibmvnic.c
2221
u8 *data, *cur;
drivers/net/ethernet/ibm/ibmvnic.c
2225
cur = hdr_data + len - tmp_len;
drivers/net/ethernet/ibm/ibmvnic.c
2245
memcpy(data, cur, tmp);
drivers/net/ethernet/ibm/ibmvnic.c
2511
int cur, i;
drivers/net/ethernet/ibm/ibmvnic.c
2515
cur = skb_headlen(skb);
drivers/net/ethernet/ibm/ibmvnic.c
2521
memcpy(dst + cur, skb_frag_address(frag),
drivers/net/ethernet/ibm/ibmvnic.c
2523
cur += skb_frag_size(frag);
drivers/net/ethernet/ibm/ibmvnic.c
3966
scrq->cur = 0;
drivers/net/ethernet/ibm/ibmvnic.c
4663
union sub_crq *entry = &scrq->msgs[scrq->cur];
drivers/net/ethernet/ibm/ibmvnic.c
4683
entry = &scrq->msgs[scrq->cur];
drivers/net/ethernet/ibm/ibmvnic.c
4685
if (++scrq->cur == scrq->size)
drivers/net/ethernet/ibm/ibmvnic.c
4686
scrq->cur = 0;
drivers/net/ethernet/ibm/ibmvnic.c
4705
crq = &queue->msgs[queue->cur];
drivers/net/ethernet/ibm/ibmvnic.c
4707
if (++queue->cur == queue->size)
drivers/net/ethernet/ibm/ibmvnic.c
4708
queue->cur = 0;
drivers/net/ethernet/ibm/ibmvnic.c
6221
crq->cur = 0;
drivers/net/ethernet/ibm/ibmvnic.c
6317
crq->cur = 0;
drivers/net/ethernet/ibm/ibmvnic.h
792
int size, cur;
drivers/net/ethernet/ibm/ibmvnic.h
819
int size, cur;
drivers/net/ethernet/intel/ice/ice.h
549
u32 cur;
drivers/net/ethernet/intel/igc/igc_main.c
4038
struct igc_nfc_rule *pred, *cur;
drivers/net/ethernet/intel/igc/igc_main.c
4046
list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
drivers/net/ethernet/intel/igc/igc_main.c
4047
if (cur->location >= rule->location)
drivers/net/ethernet/intel/igc/igc_main.c
4049
pred = cur;
drivers/net/ethernet/jme.c
1113
if (likely(atmp == dpi->cur)) {
drivers/net/ethernet/jme.c
1140
if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
drivers/net/ethernet/jme.c
1141
if (dpi->attempt < dpi->cur)
drivers/net/ethernet/jme.c
1144
dpi->cur = dpi->attempt;
drivers/net/ethernet/jme.c
2406
switch (jme->dpi.cur) {
drivers/net/ethernet/jme.c
2441
dpi->cur = PCC_P1;
drivers/net/ethernet/jme.c
3023
jme->dpi.cur = PCC_P1;
drivers/net/ethernet/jme.c
360
dpi->cur = PCC_P1;
drivers/net/ethernet/jme.h
146
unsigned char cur;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3825
u32 val, cur;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3835
cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3836
val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3838
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3839
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3856
u32 val, cur;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3866
cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3867
val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3869
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3870
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
drivers/net/ethernet/mediatek/mtk_ppe.c
558
struct mtk_flow_entry *cur;
drivers/net/ethernet/mediatek/mtk_ppe.c
564
hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
drivers/net/ethernet/mediatek/mtk_ppe.c
568
hwe = mtk_foe_get_entry(ppe, cur->hash);
drivers/net/ethernet/mediatek/mtk_ppe.c
572
cur->hash = 0xffff;
drivers/net/ethernet/mediatek/mtk_ppe.c
573
__mtk_foe_entry_clear(ppe, cur);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
60
const struct reg_dump *cur;
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
63
for (cur = regs; cur < &regs[n_regs]; cur++) {
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
64
switch (cur->type) {
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
67
cur > regs ? "\n" : "",
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
68
cur->name);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
72
val = wed_r32(dev, cur->offset);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
75
val = wdma_r32(dev, cur->offset);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
78
val = wpdma_tx_r32(dev, cur->base, cur->offset);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
81
val = wpdma_txfree_r32(dev, cur->offset);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
84
val = wpdma_rx_r32(dev, cur->base, cur->offset);
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
87
print_reg_val(s, cur->name, val);
drivers/net/ethernet/mellanox/mlx4/mcg.c
1000
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
drivers/net/ethernet/mellanox/mlx4/mcg.c
904
struct mlx4_spec_list *cur;
drivers/net/ethernet/mellanox/mlx4/mcg.c
913
list_for_each_entry(cur, &rule->list, list) {
drivers/net/ethernet/mellanox/mlx4/mcg.c
914
switch (cur->id) {
drivers/net/ethernet/mellanox/mlx4/mcg.c
917
"dmac = %pM ", &cur->eth.dst_mac);
drivers/net/ethernet/mellanox/mlx4/mcg.c
918
if (cur->eth.ether_type)
drivers/net/ethernet/mellanox/mlx4/mcg.c
921
be16_to_cpu(cur->eth.ether_type));
drivers/net/ethernet/mellanox/mlx4/mcg.c
922
if (cur->eth.vlan_id)
drivers/net/ethernet/mellanox/mlx4/mcg.c
925
be16_to_cpu(cur->eth.vlan_id));
drivers/net/ethernet/mellanox/mlx4/mcg.c
929
if (cur->ipv4.src_ip)
drivers/net/ethernet/mellanox/mlx4/mcg.c
932
&cur->ipv4.src_ip);
drivers/net/ethernet/mellanox/mlx4/mcg.c
933
if (cur->ipv4.dst_ip)
drivers/net/ethernet/mellanox/mlx4/mcg.c
936
&cur->ipv4.dst_ip);
drivers/net/ethernet/mellanox/mlx4/mcg.c
941
if (cur->tcp_udp.src_port)
drivers/net/ethernet/mellanox/mlx4/mcg.c
944
be16_to_cpu(cur->tcp_udp.src_port));
drivers/net/ethernet/mellanox/mlx4/mcg.c
945
if (cur->tcp_udp.dst_port)
drivers/net/ethernet/mellanox/mlx4/mcg.c
948
be16_to_cpu(cur->tcp_udp.dst_port));
drivers/net/ethernet/mellanox/mlx4/mcg.c
953
"dst-gid = %pI6\n", cur->ib.dst_gid);
drivers/net/ethernet/mellanox/mlx4/mcg.c
956
cur->ib.dst_gid_msk);
drivers/net/ethernet/mellanox/mlx4/mcg.c
961
"VNID = %d ", be32_to_cpu(cur->vxlan.vni));
drivers/net/ethernet/mellanox/mlx4/mcg.c
981
struct mlx4_spec_list *cur;
drivers/net/ethernet/mellanox/mlx4/mcg.c
999
list_for_each_entry(cur, &rule->list, list) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
40
struct mlx5e_rep_bond_metadata *cur;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
42
list_for_each_entry(cur, &uplink_priv->bond->metadata_list, list) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
43
if (cur->lag_dev == lag_dev) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
44
found = cur;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
57
struct mlx5e_rep_bond_slave_entry *cur;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
59
list_for_each_entry(cur, &mdata->slaves_list, list) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
60
if (cur->netdev == netdev) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
61
found = cur;
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
163
static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
170
if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
173
if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
176
return cur;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2920
((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
drivers/net/ethernet/mellanox/mlx5/core/main.c
398
memcpy(dev->caps.hca[cap_type]->cur, hca_caps,
drivers/net/ethernet/mellanox/mlx5/core/main.c
474
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
drivers/net/ethernet/mellanox/mlx5/core/main.c
569
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
drivers/net/ethernet/mellanox/mlx5/core/main.c
603
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur,
drivers/net/ethernet/mellanox/mlx5/core/main.c
726
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur,
drivers/net/ethernet/mellanox/mlx5/core/main.c
755
memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur,
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
1586
&cm->occ.cur, &cm->occ.max);
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
1600
&cm->occ.cur, &cm->occ.max);
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
1772
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
1791
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
24
u32 cur;
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
294
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
drivers/net/ethernet/micrel/ksz884x.c
1755
u32 cur;
drivers/net/ethernet/micrel/ksz884x.c
1775
cur = *last;
drivers/net/ethernet/micrel/ksz884x.c
1776
if (data != cur) {
drivers/net/ethernet/micrel/ksz884x.c
1778
if (data < cur)
drivers/net/ethernet/micrel/ksz884x.c
1780
data -= cur;
drivers/net/ethernet/micrel/ksz884x.c
3461
struct ksz_desc *cur = desc_info->ring;
drivers/net/ethernet/micrel/ksz884x.c
3465
cur->phw = desc++;
drivers/net/ethernet/micrel/ksz884x.c
3467
previous = cur++;
drivers/net/ethernet/micrel/ksz884x.c
3477
desc_info->cur = desc_info->ring;
drivers/net/ethernet/micrel/ksz884x.c
3497
info->cur = info->ring;
drivers/net/ethernet/micrel/ksz884x.c
3607
get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
drivers/net/ethernet/micrel/ksz884x.c
3608
hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
drivers/net/ethernet/micrel/ksz884x.c
3632
struct ksz_desc *cur = hw->tx_desc_info.cur;
drivers/net/ethernet/micrel/ksz884x.c
3634
cur->sw.buf.tx.last_seg = 1;
drivers/net/ethernet/micrel/ksz884x.c
3638
cur->sw.buf.tx.intr = 1;
drivers/net/ethernet/micrel/ksz884x.c
3644
cur->sw.buf.tx.dest_port = hw->dst_ports;
drivers/net/ethernet/micrel/ksz884x.c
3646
release_desc(cur);
drivers/net/ethernet/micrel/ksz884x.c
4279
first = info->cur;
drivers/net/ethernet/micrel/ksz884x.c
4323
info->cur = desc;
drivers/net/ethernet/micrel/ksz884x.c
934
struct ksz_desc *cur;
drivers/net/ethernet/mscc/ocelot_io.c
93
u32 cur = ocelot_port_readl(port, reg);
drivers/net/ethernet/mscc/ocelot_io.c
95
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
drivers/net/ethernet/natsemi/natsemi.c
2732
static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
drivers/net/ethernet/natsemi/natsemi.c
2745
*cur = 0;
drivers/net/ethernet/natsemi/natsemi.c
2749
*cur |= WAKE_PHY;
drivers/net/ethernet/natsemi/natsemi.c
2751
*cur |= WAKE_UCAST;
drivers/net/ethernet/natsemi/natsemi.c
2753
*cur |= WAKE_MCAST;
drivers/net/ethernet/natsemi/natsemi.c
2755
*cur |= WAKE_BCAST;
drivers/net/ethernet/natsemi/natsemi.c
2757
*cur |= WAKE_ARP;
drivers/net/ethernet/natsemi/natsemi.c
2759
*cur |= WAKE_MAGIC;
drivers/net/ethernet/natsemi/natsemi.c
2762
*cur |= WAKE_MAGICSECURE;
drivers/net/ethernet/natsemi/natsemi.c
640
static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
625
nx_mac_list_t *cur;
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
629
cur = list_entry(head, nx_mac_list_t, list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
631
if (ether_addr_equal(addr, cur->mac_addr)) {
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
637
cur = kzalloc_obj(nx_mac_list_t, GFP_ATOMIC);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
638
if (cur == NULL)
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
641
memcpy(cur->mac_addr, addr, ETH_ALEN);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
642
list_add_tail(&cur->list, &adapter->mac_list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
644
cur->mac_addr, NETXEN_MAC_ADD);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
657
nx_mac_list_t *cur;
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
687
cur = list_entry(head->next, nx_mac_list_t, list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
690
cur->mac_addr, NETXEN_MAC_DEL);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
691
list_del(&cur->list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
692
kfree(cur);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
717
nx_mac_list_t *cur;
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
721
cur = list_entry(head->next, nx_mac_list_t, list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
723
cur->mac_addr, NETXEN_MAC_DEL);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
724
list_del(&cur->list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
725
kfree(cur);
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1647
struct list_head *cur;
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1710
list_for_each(cur, &sds_ring->free_list[ring]) {
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1711
rxbuf = list_entry(cur,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3161
struct nx_ip_list *cur, *tmp_cur;
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3163
list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3165
if (cur->master) {
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3166
netxen_config_ipaddr(adapter, cur->ip_addr,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3168
list_del(&cur->list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3169
kfree(cur);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3172
netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3173
list_del(&cur->list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3174
kfree(cur);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3184
struct nx_ip_list *cur, *tmp_cur;
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3195
cur = list_entry(head, struct nx_ip_list, list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3197
if (cur->ip_addr == ifa->ifa_address)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3201
cur = kzalloc_obj(struct nx_ip_list, GFP_ATOMIC);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3202
if (cur == NULL)
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3206
cur->master = !!netif_is_bond_master(dev);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3207
cur->ip_addr = ifa->ifa_address;
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3208
list_add_tail(&cur->list, &adapter->ip_list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3213
list_for_each_entry_safe(cur, tmp_cur,
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3215
if (cur->ip_addr == ifa->ifa_address) {
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3216
list_del(&cur->list);
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
3217
kfree(cur);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
1003
__func__, cur, prev, next);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
1014
__func__, cur, prev, next);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
145
u32 cur, prev;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
146
cur = adapter->ahw->idc.curr_state;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
151
adapter->ahw->idc.name[cur],
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
983
u32 cur, prev, next;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
985
cur = adapter->ahw->idc.curr_state;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
993
__func__, cur, prev, state);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
997
if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
462
struct qlcnic_mac_vlan_list *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
466
list_for_each_entry(cur, &adapter->mac_list, list) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
467
if (ether_addr_equal(addr, cur->mac_addr)) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
468
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
472
list_del(&cur->list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
473
kfree(cur);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
483
struct qlcnic_mac_vlan_list *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
486
list_for_each_entry(cur, &adapter->mac_list, list) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
487
if (ether_addr_equal(addr, cur->mac_addr) &&
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
488
cur->vlan_id == vlan)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
492
cur = kzalloc_obj(*cur, GFP_ATOMIC);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
493
if (cur == NULL)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
496
memcpy(cur->mac_addr, addr, ETH_ALEN);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
499
cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
500
kfree(cur);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
504
cur->vlan_id = vlan;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
505
cur->mac_type = mac_type;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
507
list_add_tail(&cur->list, &adapter->mac_list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
513
struct qlcnic_mac_vlan_list *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
517
cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
518
if (cur->mac_type != QLCNIC_MULTICAST_MAC)
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
521
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
522
cur->vlan_id, QLCNIC_MAC_DEL);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
523
list_del(&cur->list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
524
kfree(cur);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
618
struct qlcnic_mac_vlan_list *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
621
cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
623
cur->mac_addr, 0, QLCNIC_MAC_DEL);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
624
list_del(&cur->list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
625
kfree(cur);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
1360
struct list_head *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
1415
list_for_each(cur, &sds_ring->free_list[ring]) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
1416
rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
1883
struct list_head *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
1929
list_for_each(cur, &sds_ring->free_list[ring]) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
1930
rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
320
struct qlcnic_mac_vlan_list *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
322
list_for_each_entry(cur, &adapter->mac_list, list) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
323
if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
324
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
326
list_del(&cur->list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
327
kfree(cur);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2106
struct qlcnic_mac_vlan_list *cur;
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2109
cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2110
qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2112
list_del(&cur->list);
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
2113
kfree(cur);
drivers/net/ethernet/realtek/rtase/rtase_main.c
401
u32 cur;
drivers/net/ethernet/realtek/rtase/rtase_main.c
403
for (cur = ring_start; ring_end - cur > 0; cur++) {
drivers/net/ethernet/realtek/rtase/rtase_main.c
404
u32 i = cur % RTASE_NUM_DESC;
drivers/net/ethernet/realtek/rtase/rtase_main.c
417
return cur - ring_start;
drivers/net/ethernet/renesas/rswitch.h
972
unsigned int cur;
drivers/net/ethernet/renesas/rswitch_main.c
1048
desc = &gq->ts_ring[gq->cur];
drivers/net/ethernet/renesas/rswitch_main.c
1075
gq->cur = rswitch_next_queue_index(gq, true, 1);
drivers/net/ethernet/renesas/rswitch_main.c
1076
desc = &gq->ts_ring[gq->cur];
drivers/net/ethernet/renesas/rswitch_main.c
1763
gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
drivers/net/ethernet/renesas/rswitch_main.c
1764
gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
drivers/net/ethernet/renesas/rswitch_main.c
1778
gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
drivers/net/ethernet/renesas/rswitch_main.c
1784
gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
drivers/net/ethernet/renesas/rswitch_main.c
243
bool cur, unsigned int num)
drivers/net/ethernet/renesas/rswitch_main.c
245
unsigned int index = cur ? gq->cur : gq->dirty;
drivers/net/ethernet/renesas/rswitch_main.c
257
if (gq->cur >= gq->dirty)
drivers/net/ethernet/renesas/rswitch_main.c
258
return gq->cur - gq->dirty;
drivers/net/ethernet/renesas/rswitch_main.c
260
return gq->ring_size - gq->dirty + gq->cur;
drivers/net/ethernet/renesas/rswitch_main.c
794
skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
drivers/net/ethernet/renesas/rswitch_main.c
808
virt_to_page(gq->rx_bufs[gq->cur]),
drivers/net/ethernet/renesas/rswitch_main.c
809
offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
drivers/net/ethernet/renesas/rswitch_main.c
841
desc = &gq->rx_ring[gq->cur];
drivers/net/ethernet/renesas/rswitch_main.c
865
gq->rx_bufs[gq->cur] = NULL;
drivers/net/ethernet/renesas/rswitch_main.c
866
gq->cur = rswitch_next_queue_index(gq, true, 1);
drivers/net/ethernet/renesas/rswitch_main.c
867
desc = &gq->rx_ring[gq->cur];
drivers/net/ethernet/sis/sis190.c
1255
struct sis190_phy *cur, *next;
drivers/net/ethernet/sis/sis190.c
1257
list_for_each_entry_safe(cur, next, first_phy, list) {
drivers/net/ethernet/sis/sis190.c
1258
kfree(cur);
drivers/net/ethernet/sis/sis190.c
515
u32 cur;
drivers/net/ethernet/sis/sis190.c
517
for (cur = start; cur < end; cur++) {
drivers/net/ethernet/sis/sis190.c
518
unsigned int i = cur % NUM_RX_DESC;
drivers/net/ethernet/sis/sis190.c
528
return cur - start;
drivers/net/ethernet/smsc/smc91x.c
170
int cur;
drivers/net/ethernet/smsc/smc91x.c
172
for (cur = 0; cur < 8; cur++) {
drivers/net/ethernet/sun/niu.c
8621
if (p->cur[type] >= NIU_MAX_PORTS) {
drivers/net/ethernet/sun/niu.c
8625
idx = p->cur[type];
drivers/net/ethernet/sun/niu.c
8628
p->cur[type] = idx + 1;
drivers/net/ethernet/sun/niu.c
8636
for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
drivers/net/ethernet/sun/niu.c
8640
for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
drivers/net/ethernet/sun/niu.c
8668
if (p->cur[PHY_TYPE_MII])
drivers/net/ethernet/sun/niu.c
8671
return p->cur[PHY_TYPE_MII];
drivers/net/ethernet/sun/niu.h
3044
u8 cur[PHY_TYPE_MAX];
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
446
ring->cur = 0;
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
486
ring->cur = 0;
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
508
start_index = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
509
cur_index = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
1007
(ring->cur - 1) & (ring->dma_desc_count - 1));
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
1060
int start_index = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
1143
unsigned int start_index = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
2648
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
2662
xlgmac_dump_rx_desc(pdata, ring, ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
2811
ring->cur & (ring->dma_desc_count - 1), ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
688
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
711
int start_index = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
712
int cur_index = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
997
ring->cur = cur_index + 1;
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1046
unsigned int cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1055
cur = ring->cur;
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1063
(ring->dirty != cur)) {
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1134
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1150
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1159
ring->cur++;
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
1268
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
30
return (ring->dma_desc_count - (ring->cur - ring->dirty));
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
35
return (ring->cur - ring->dirty);
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
962
while (ring->dirty != ring->cur) {
drivers/net/ethernet/synopsys/dwc-xlgmac.h
340
unsigned int cur;
drivers/net/ethernet/xilinx/ll_temac_main.c
96
ktime_t cur = ktime_get();
drivers/net/ethernet/xilinx/ll_temac_main.c
98
return hard_acs_rdy(lp) || ktime_after(cur, timeout);
drivers/net/mdio/mdio-mux.c
141
int (*switch_fn)(int cur, int desired, void *data),
drivers/net/netconsole.c
1948
char *cur = opt;
drivers/net/netconsole.c
1952
if (*cur != '@') {
drivers/net/netconsole.c
1953
delim = strchr(cur, '@');
drivers/net/netconsole.c
1957
if (kstrtou16(cur, 10, &np->local_port))
drivers/net/netconsole.c
1959
cur = delim;
drivers/net/netconsole.c
1961
cur++;
drivers/net/netconsole.c
1963
if (*cur != '/') {
drivers/net/netconsole.c
1965
delim = strchr(cur, '/');
drivers/net/netconsole.c
1969
ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
drivers/net/netconsole.c
1974
cur = delim;
drivers/net/netconsole.c
1976
cur++;
drivers/net/netconsole.c
1978
if (*cur != ',') {
drivers/net/netconsole.c
1980
delim = strchr(cur, ',');
drivers/net/netconsole.c
1987
if (!strchr(cur, ':'))
drivers/net/netconsole.c
1988
strscpy(np->dev_name, cur, sizeof(np->dev_name));
drivers/net/netconsole.c
1989
else if (!mac_pton(cur, np->dev_mac))
drivers/net/netconsole.c
1992
cur = delim;
drivers/net/netconsole.c
1994
cur++;
drivers/net/netconsole.c
1996
if (*cur != '@') {
drivers/net/netconsole.c
1998
delim = strchr(cur, '@');
drivers/net/netconsole.c
2002
if (*cur == ' ' || *cur == '\t')
drivers/net/netconsole.c
2004
if (kstrtou16(cur, 10, &np->remote_port))
drivers/net/netconsole.c
2006
cur = delim;
drivers/net/netconsole.c
2008
cur++;
drivers/net/netconsole.c
2011
delim = strchr(cur, '/');
drivers/net/netconsole.c
2015
ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
drivers/net/netconsole.c
2022
cur = delim + 1;
drivers/net/netconsole.c
2024
if (*cur != 0) {
drivers/net/netconsole.c
2026
if (!mac_pton(cur, np->remote_mac))
drivers/net/netconsole.c
2035
np_info(np, "couldn't parse config at '%s'!\n", cur);
drivers/net/phy/motorcomm.c
951
u32 cur;
drivers/net/phy/motorcomm.c
955
{.vol = YT8531_LDO_VOL_1V8, .ds = 0, .cur = 1200},
drivers/net/phy/motorcomm.c
956
{.vol = YT8531_LDO_VOL_1V8, .ds = 1, .cur = 2100},
drivers/net/phy/motorcomm.c
957
{.vol = YT8531_LDO_VOL_1V8, .ds = 2, .cur = 2700},
drivers/net/phy/motorcomm.c
958
{.vol = YT8531_LDO_VOL_1V8, .ds = 3, .cur = 2910},
drivers/net/phy/motorcomm.c
959
{.vol = YT8531_LDO_VOL_1V8, .ds = 4, .cur = 3110},
drivers/net/phy/motorcomm.c
960
{.vol = YT8531_LDO_VOL_1V8, .ds = 5, .cur = 3600},
drivers/net/phy/motorcomm.c
961
{.vol = YT8531_LDO_VOL_1V8, .ds = 6, .cur = 3970},
drivers/net/phy/motorcomm.c
962
{.vol = YT8531_LDO_VOL_1V8, .ds = 7, .cur = 4350},
drivers/net/phy/motorcomm.c
963
{.vol = YT8531_LDO_VOL_3V3, .ds = 0, .cur = 3070},
drivers/net/phy/motorcomm.c
964
{.vol = YT8531_LDO_VOL_3V3, .ds = 1, .cur = 4080},
drivers/net/phy/motorcomm.c
965
{.vol = YT8531_LDO_VOL_3V3, .ds = 2, .cur = 4370},
drivers/net/phy/motorcomm.c
966
{.vol = YT8531_LDO_VOL_3V3, .ds = 3, .cur = 4680},
drivers/net/phy/motorcomm.c
967
{.vol = YT8531_LDO_VOL_3V3, .ds = 4, .cur = 5020},
drivers/net/phy/motorcomm.c
968
{.vol = YT8531_LDO_VOL_3V3, .ds = 5, .cur = 5450},
drivers/net/phy/motorcomm.c
969
{.vol = YT8531_LDO_VOL_3V3, .ds = 6, .cur = 5740},
drivers/net/phy/motorcomm.c
970
{.vol = YT8531_LDO_VOL_3V3, .ds = 7, .cur = 6140},
drivers/net/phy/motorcomm.c
983
static int yt8531_get_ds_map(struct phy_device *phydev, u32 cur)
drivers/net/phy/motorcomm.c
990
if (yt8531_ldo_vol[i].vol == vol && yt8531_ldo_vol[i].cur == cur)
drivers/net/team/team_core.c
828
struct team_port *cur)
drivers/net/team/team_core.c
830
if (port->priority < cur->priority)
drivers/net/team/team_core.c
832
if (port->priority > cur->priority)
drivers/net/team/team_core.c
834
if (port->index < cur->index)
drivers/net/team/team_core.c
842
struct team_port *cur;
drivers/net/team/team_core.c
850
list_for_each_entry(cur, qom_list, qom_list) {
drivers/net/team/team_core.c
851
if (team_queue_override_port_has_gt_prio_than(port, cur))
drivers/net/team/team_core.c
853
node = &cur->qom_list;
drivers/net/team/team_core.c
922
struct team_port *cur;
drivers/net/team/team_core.c
924
list_for_each_entry(cur, &team->port_list, list)
drivers/net/team/team_core.c
925
if (cur == port)
drivers/net/team/team_mode_broadcast.c
16
struct team_port *cur;
drivers/net/team/team_mode_broadcast.c
22
list_for_each_entry_rcu(cur, &team->port_list, list) {
drivers/net/team/team_mode_broadcast.c
23
if (team_port_txable(cur)) {
drivers/net/team/team_mode_broadcast.c
33
last = cur;
drivers/net/wireless/ath/ath12k/dp_rx.c
27
struct list_head *cur;
drivers/net/wireless/ath/ath12k/dp_rx.c
36
list_for_each(cur, head) {
drivers/net/wireless/ath/ath12k/dp_rx.c
40
rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
drivers/net/wireless/ath/ath12k/dp_rx.c
47
list_cut_before(list, head, cur);
drivers/net/wireless/ath/ath6kl/txrx.c
1113
u16 idx, st, cur, end;
drivers/net/wireless/ath/ath6kl/txrx.c
1136
cur = seq_no;
drivers/net/wireless/ath/ath6kl/txrx.c
1139
if (((st < end) && (cur < st || cur > end)) ||
drivers/net/wireless/ath/ath6kl/txrx.c
1140
((st > end) && (cur > end) && (cur < st))) {
drivers/net/wireless/ath/ath6kl/txrx.c
1145
(cur < end || cur > extended_end)) ||
drivers/net/wireless/ath/ath6kl/txrx.c
1146
((end > extended_end) && (cur > extended_end) &&
drivers/net/wireless/ath/ath6kl/txrx.c
1147
(cur < end))) {
drivers/net/wireless/ath/ath6kl/txrx.c
1150
if (cur >= rxtid->hold_q_sz - 1)
drivers/net/wireless/ath/ath6kl/txrx.c
1151
rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
drivers/net/wireless/ath/ath6kl/txrx.c
1154
(rxtid->hold_q_sz - 2 - cur);
drivers/net/wireless/ath/ath6kl/txrx.c
1161
if (cur >= rxtid->hold_q_sz - 1)
drivers/net/wireless/ath/ath6kl/txrx.c
1162
st = cur - (rxtid->hold_q_sz - 1);
drivers/net/wireless/ath/ath6kl/txrx.c
1165
(rxtid->hold_q_sz - 2 - cur);
drivers/net/wireless/ath/ath9k/channel.c
331
struct ath_chanctx *prev, *cur;
drivers/net/wireless/ath/ath9k/channel.c
338
cur = sc->cur_chan;
drivers/net/wireless/ath/ath9k/channel.c
339
prev = ath_chanctx_get_next(sc, cur);
drivers/net/wireless/ath/ath9k/channel.c
345
cur_tsf = (u32) cur->tsf_val +
drivers/net/wireless/ath/ath9k/channel.c
346
ath9k_hw_get_tsf_offset(cur->tsf_ts, ts);
drivers/net/wireless/ath/ath9k/hw.c
1851
u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur)
drivers/net/wireless/ath/ath9k/hw.c
1853
if (cur == 0)
drivers/net/wireless/ath/ath9k/hw.c
1854
cur = ktime_get_raw();
drivers/net/wireless/ath/ath9k/hw.c
1855
return ktime_us_delta(cur, last);
drivers/net/wireless/ath/ath9k/hw.h
1069
u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur);
drivers/net/wireless/ath/wcn36xx/dxe.c
242
struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
drivers/net/wireless/ath/wcn36xx/dxe.c
248
cur->bd_phy_addr = bd_phy_addr;
drivers/net/wireless/ath/wcn36xx/dxe.c
249
cur->bd_cpu_addr = bd_cpu_addr;
drivers/net/wireless/ath/wcn36xx/dxe.c
253
cur->bd_phy_addr = 0;
drivers/net/wireless/ath/wcn36xx/dxe.c
254
cur->bd_cpu_addr = NULL;
drivers/net/wireless/ath/wcn36xx/dxe.c
256
cur = cur->next;
drivers/net/wireless/ath/wcn36xx/dxe.c
335
struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
drivers/net/wireless/ath/wcn36xx/dxe.c
339
kfree_skb(cur->skb);
drivers/net/wireless/ath/wcn36xx/dxe.c
340
cur = cur->next;
drivers/net/wireless/broadcom/b43/debugfs.c
745
struct b43_txstatus *cur;
drivers/net/wireless/broadcom/b43/debugfs.c
755
cur = &(log->log[i]);
drivers/net/wireless/broadcom/b43/debugfs.c
756
memcpy(cur, status, sizeof(*cur));
drivers/net/wireless/broadcom/b43legacy/debugfs.c
422
struct b43legacy_txstatus *cur;
drivers/net/wireless/broadcom/b43legacy/debugfs.c
434
cur = &(log->log[i]);
drivers/net/wireless/broadcom/b43legacy/debugfs.c
435
memcpy(cur, status, sizeof(*cur));
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
541
struct brcmf_fw_item *cur;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
551
cur = &fwctx->req->items[fwctx->curpos];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
564
else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
581
if (!nvram && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
585
cur->nv_data.data = nvram;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
586
cur->nv_data.len = nvram_length;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
596
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
599
brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, fw ? "" : "not ");
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
601
switch (cur->type) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
607
cur->binary = fw;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
613
brcmf_err("unknown fw type: %d\n", cur->type);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
618
return (cur->flags & BRCMF_FW_REQF_OPTIONAL) ? 0 : ret;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
651
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
661
alt_path = brcm_alt_fw_path(cur->path,
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
673
return request_firmware(fw, cur->path, fwctx->dev);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1294
struct sk_buff *cur, *next;
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1296
skb_queue_walk_safe(&bus->glom, cur, next) {
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1297
skb_unlink(cur, &bus->glom);
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
1298
brcmu_pkt_buf_free_skb(cur);
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
962
uint cur;
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
963
cur =
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
970
di->rxin, di->rxout, cur);
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
282
static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
289
i++, prev++, cur++, delta++, max_delta++, accum++) {
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
290
if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
drivers/net/wireless/intel/iwlwifi/dvm/rx.c
292
le32_to_cpu(*cur) - le32_to_cpu(*prev));
drivers/net/wireless/intel/iwlwifi/mld/tx.c
819
!sta->cur->max_rc_amsdu_len)
drivers/net/wireless/intel/iwlwifi/mld/tx.c
836
max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
1973
u8 cur = lq_sta->lq.reduced_tpc;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
1983
return cur != lq_sta->pers.dbg_fixed_txp_reduction;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
1999
return cur != TPC_NO_REDUCTION;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2002
rs_get_adjacent_txp(mvm, cur, &weak, &strong);
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2006
sr = window[cur].success_ratio;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2007
current_tpt = window[cur].average_tpt;
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2015
cur, current_tpt, sr, weak, strong,
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
2948
mvmsta->max_amsdu_len = sta->cur->max_amsdu_len;
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
943
min_t(unsigned int, sta->cur->max_amsdu_len,
drivers/net/wireless/mediatek/mt76/dma.c
1055
int qid, done = 0, cur;
drivers/net/wireless/mediatek/mt76/dma.c
1063
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
drivers/net/wireless/mediatek/mt76/dma.c
1065
done += cur;
drivers/net/wireless/mediatek/mt76/dma.c
1066
} while (cur && done < budget);
drivers/net/wireless/mediatek/mt76/eeprom.c
212
struct device_node *cur, *fallback = NULL;
drivers/net/wireless/mediatek/mt76/eeprom.c
222
for_each_child_of_node(np, cur) {
drivers/net/wireless/mediatek/mt76/eeprom.c
223
struct property *country = of_find_property(cur, "country", NULL);
drivers/net/wireless/mediatek/mt76/eeprom.c
224
struct property *regd = of_find_property(cur, "regdomain", NULL);
drivers/net/wireless/mediatek/mt76/eeprom.c
227
fallback = cur;
drivers/net/wireless/mediatek/mt76/eeprom.c
234
return cur;
drivers/net/wireless/mediatek/mt76/eeprom.c
272
struct device_node *cur;
drivers/net/wireless/mediatek/mt76/eeprom.c
276
for_each_child_of_node(np, cur) {
drivers/net/wireless/mediatek/mt76/eeprom.c
277
val = mt76_get_of_array(cur, "channels", &len, 2);
drivers/net/wireless/mediatek/mt76/eeprom.c
284
return cur;
drivers/net/wireless/mediatek/mt76/eeprom.c
329
int i, cur;
drivers/net/wireless/mediatek/mt76/eeprom.c
335
cur = data[0];
drivers/net/wireless/mediatek/mt76/eeprom.c
342
if (--cur > 0)
drivers/net/wireless/mediatek/mt76/eeprom.c
350
cur = data[0];
drivers/net/wireless/mediatek/mt76/mac80211.c
1209
int cur, diff;
drivers/net/wireless/mediatek/mt76/mac80211.c
1211
cur = *chain_signal;
drivers/net/wireless/mediatek/mt76/mac80211.c
1213
cur > 0)
drivers/net/wireless/mediatek/mt76/mac80211.c
1216
if (cur > signal)
drivers/net/wireless/mediatek/mt76/mac80211.c
1217
swap(cur, signal);
drivers/net/wireless/mediatek/mt76/mac80211.c
1219
diff = signal - cur;
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
2218
static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
2223
if (cur == freqs[i])
drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
283
u16 cur = tm_change_map[i];
drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
285
if (td->param_set[cur / 32] & BIT(cur % 32))
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
113
cur = (__le32 *)(fw->data + sizeof(*hdr));
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
117
mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
119
cur += len / sizeof(*cur);
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
128
mt76_wr_copy(dev, offset, cur, len);
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
21
__le32 *cur;
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
58
cur = (__le32 *)(fw->data + sizeof(*hdr));
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
60
mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
86
__le32 *cur;
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
3003
static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
3008
if (cur == freqs[i])
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
100
cur |= val;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
102
return mt76_wmac_spi_write(dev, adie, addr, cur);
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
1156
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
1165
read_poll_timeout(mt76_rr, cur, !(cur ^ MT_CONN_INFRA_CONN),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
150
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
152
read_poll_timeout(mt76_rr, cur,
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
153
FIELD_GET(MT_SEMA_RFSPI_STATUS_MASK, cur),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
235
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
240
ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_RX_EN_MASK),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
249
ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_TX_RDY_MASK),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
267
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
279
return read_poll_timeout(mt76_rr, cur, (cur == con_infra_version),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
45
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
47
ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
57
ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
72
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
74
ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
84
return read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
865
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
901
read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
909
read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
93
u32 cur, ret;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
935
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
945
return read_poll_timeout(mt76_rr, cur, (cur == 0x1d1e),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
95
ret = mt76_wmac_spi_read(dev, adie, addr, &cur);
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
953
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
958
return read_poll_timeout(mt76_rr, cur,
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
959
(FIELD_GET(MT_TOP_WFSYS_RESET_STATUS_MASK, cur) == enable),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
967
u32 cur;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
979
ret = read_poll_timeout(mt76_rr, cur,
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
980
!(cur & MT_CONN_INFRA_CONN_WF_MASK),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
986
ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_WFDMA2CONN_MASK),
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
99
cur &= ~mask;
drivers/net/wireless/mediatek/mt76/mt7915/soc.c
992
return read_poll_timeout(mt76_rr, cur, (cur == 0x02060000),
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
679
u16 cur = tm_change_map[i];
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
681
if (td->param_set[cur / 32] & BIT(cur % 32))
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
542
char *buf, *cur;
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
548
cur = buf;
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
553
ret = snprintf(cur, len, "0x%x,", le32_to_cpu(val[i]));
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
557
cur += ret;
drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
560
if (cur > buf)
drivers/net/wireless/mediatek/mt76/testmode.c
488
struct nlattr *cur;
drivers/net/wireless/mediatek/mt76/testmode.c
492
nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
drivers/net/wireless/mediatek/mt76/testmode.c
493
if (nla_len(cur) != 1 ||
drivers/net/wireless/mediatek/mt76/testmode.c
497
td->tx_power[idx++] = nla_get_u8(cur);
drivers/net/wireless/mediatek/mt76/testmode.c
502
struct nlattr *cur;
drivers/net/wireless/mediatek/mt76/testmode.c
506
nla_for_each_nested(cur, tb[MT76_TM_ATTR_MAC_ADDRS], rem) {
drivers/net/wireless/mediatek/mt76/testmode.c
507
if (nla_len(cur) != ETH_ALEN || idx >= 3)
drivers/net/wireless/mediatek/mt76/testmode.c
510
memcpy(td->addr[idx], nla_data(cur), ETH_ALEN);
drivers/net/wireless/mediatek/mt76/util.c
12
u32 cur;
drivers/net/wireless/mediatek/mt76/util.c
16
cur = __mt76_rr(dev, offset) & mask;
drivers/net/wireless/mediatek/mt76/util.c
17
if (cur == val)
drivers/net/wireless/mediatek/mt76/util.c
30
u32 cur;
drivers/net/wireless/mediatek/mt76/util.c
34
cur = __mt76_rr(dev, offset) & mask;
drivers/net/wireless/mediatek/mt76/util.c
35
if (cur == val)
drivers/net/wireless/mediatek/mt76/util.c
47
int i, idx = 0, cur;
drivers/net/wireless/mediatek/mt76/util.c
55
cur = i * 32 + idx;
drivers/net/wireless/mediatek/mt76/util.c
56
if (cur >= size)
drivers/net/wireless/mediatek/mt76/util.c
60
return cur;
drivers/net/wireless/mediatek/mt7601u/core.c
31
u32 cur;
drivers/net/wireless/mediatek/mt7601u/core.c
38
cur = mt7601u_rr(dev, offset) & mask;
drivers/net/wireless/mediatek/mt7601u/core.c
39
if (cur == val)
drivers/net/wireless/mediatek/mt7601u/core.c
53
u32 cur;
drivers/net/wireless/mediatek/mt7601u/core.c
60
cur = mt7601u_rr(dev, offset) & mask;
drivers/net/wireless/mediatek/mt7601u/core.c
61
if (cur == val)
drivers/net/wireless/realtek/rtw88/tx.c
233
struct sk_buff *cur, *tmp;
drivers/net/wireless/realtek/rtw88/tx.c
249
skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
drivers/net/wireless/realtek/rtw88/tx.c
250
n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data;
drivers/net/wireless/realtek/rtw88/tx.c
252
__skb_unlink(cur, &tx_report->queue);
drivers/net/wireless/realtek/rtw88/tx.c
253
rtw_tx_report_tx_status(rtwdev, cur, st == 0);
drivers/net/wireless/realtek/rtw89/chan.c
267
enum rtw89_chanctx_idx cur;
drivers/net/wireless/realtek/rtw89/chan.c
270
cur = atomic_cmpxchg(&hal->roc_chanctx_idx,
drivers/net/wireless/realtek/rtw89/chan.c
272
if (cur != RTW89_CHANCTX_IDLE) {
drivers/net/wireless/realtek/rtw89/chan.c
2761
struct rtw89_mcc_role *cur,
drivers/net/wireless/realtek/rtw89/chan.c
2766
u32 old = rtw89_mcc_role_fw_macid_bitmap_to_u32(cur);
drivers/net/wireless/realtek/rtw89/chan.c
281
cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
drivers/net/wireless/realtek/rtw89/chan.c
283
if (cur == idx)
drivers/net/wireless/realtek/rtw89/chan.c
286
if (cur == RTW89_CHANCTX_IDLE)
drivers/net/wireless/realtek/rtw89/chan.c
291
"ROC is processing on entity %d\n", cur);
drivers/net/wireless/realtek/rtw89/chan.c
3300
u8 cur;
drivers/net/wireless/realtek/rtw89/chan.c
3313
cur = atomic_read(&hal->roc_chanctx_idx);
drivers/net/wireless/realtek/rtw89/chan.c
3314
if (cur == idx1)
drivers/net/wireless/realtek/rtw89/chan.c
3316
else if (cur == idx2)
drivers/net/wireless/realtek/rtw89/chan.c
3412
enum rtw89_entity_mode cur;
drivers/net/wireless/realtek/rtw89/chan.c
3445
cur = rtw89_get_entity_mode(rtwdev);
drivers/net/wireless/realtek/rtw89/chan.c
3446
switch (cur) {
drivers/net/wireless/realtek/rtw89/chan.c
370
struct rtw89_vif_link *cur;
drivers/net/wireless/realtek/rtw89/chan.c
375
cur = rtw89_get_designated_link(rtwvif);
drivers/net/wireless/realtek/rtw89/chan.c
376
if (unlikely(!cur) || !cur->chanctx_assigned)
drivers/net/wireless/realtek/rtw89/chan.c
379
if (cur == rtwvif_link)
drivers/net/wireless/realtek/rtw89/chan.c
382
rtw89_swap_chanctx(rtwdev, rtwvif_link->chanctx_idx, cur->chanctx_idx);
drivers/net/wireless/realtek/rtw89/chan.h
38
#define RTW89_MCC_NEXT_GROUP(cur) (((cur) + 1) % 4)
drivers/net/wireless/realtek/rtw89/core.c
3646
const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev);
drivers/net/wireless/realtek/rtw89/core.c
3647
u8 chan = cur->primary_channel;
drivers/net/wireless/realtek/rtw89/core.c
3648
u8 band = cur->band_type;
drivers/net/wireless/realtek/rtw89/core.c
5652
unsigned int cur;
drivers/net/wireless/realtek/rtw89/core.c
5656
cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
drivers/net/wireless/realtek/rtw89/core.c
5657
if (cur != RTW89_WAIT_COND_IDLE)
drivers/net/wireless/realtek/rtw89/core.c
5725
unsigned int cur;
drivers/net/wireless/realtek/rtw89/core.c
5733
cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
drivers/net/wireless/realtek/rtw89/core.c
5734
if (cur != cond)
drivers/net/wireless/realtek/rtw89/debug.c
4346
u32 cur = hal->disabled_dm_bitmap;
drivers/net/wireless/realtek/rtw89/debug.c
4348
rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur | BIT(flag));
drivers/net/wireless/realtek/rtw89/debug.c
4354
u32 cur = hal->disabled_dm_bitmap;
drivers/net/wireless/realtek/rtw89/debug.c
4356
rtw89_debug_disable_dm_cfg_bmap(rtwdev, cur & ~BIT(flag));
drivers/net/wireless/realtek/rtw89/debug.c
768
const s8 *bufp, const unsigned int cur, unsigned int *ate)
drivers/net/wireless/realtek/rtw89/debug.c
778
cur + cnt, &eaten);
drivers/net/wireless/realtek/rtw89/debug.c
790
p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
drivers/net/wireless/realtek/rtw89/debug.c
791
bufp[cur + 1]);
drivers/net/wireless/realtek/rtw89/debug.c
796
p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
drivers/net/wireless/realtek/rtw89/debug.c
797
bufp[cur + 1],
drivers/net/wireless/realtek/rtw89/debug.c
798
bufp[cur + 2], bufp[cur + 3]);
drivers/net/wireless/realtek/rtw89/debug.c
803
p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
drivers/net/wireless/realtek/rtw89/debug.c
804
bufp[cur + 1],
drivers/net/wireless/realtek/rtw89/debug.c
805
bufp[cur + 2], bufp[cur + 3], bufp[cur + 4],
drivers/net/wireless/realtek/rtw89/debug.c
806
bufp[cur + 5], bufp[cur + 6], bufp[cur + 7]);
drivers/net/wireless/realtek/rtw89/debug.c
823
unsigned int cur, i;
drivers/net/wireless/realtek/rtw89/debug.c
847
cur = addr - map->addr_from;
drivers/net/wireless/realtek/rtw89/debug.c
851
bufp[cur + i] = tmp >> fct;
drivers/net/wireless/realtek/rtw89/debug.c
855
for (cur = 0, i = 0; i < map->size; i++, cur += eaten)
drivers/net/wireless/realtek/rtw89/debug.c
856
p += __print_txpwr_ent(p, end - p, &map->ent[i], bufp, cur, &eaten);
drivers/net/wireless/realtek/rtw89/mac80211.c
721
struct rtw89_vif_link *cur = rtw89_get_designated_link(rtwvif);
drivers/net/wireless/realtek/rtw89/mac80211.c
724
rtw89_chip_rfk_channel(rtwdev, cur);
drivers/net/wireless/realtek/rtw89/phy.c
3038
struct rtw89_rate_desc cur = {};
drivers/net/wireless/realtek/rtw89/phy.c
3054
for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
drivers/net/wireless/realtek/rtw89/phy.c
3056
if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]])
drivers/net/wireless/realtek/rtw89/phy.c
3059
cur.rs = rs[i];
drivers/net/wireless/realtek/rtw89/phy.c
3060
for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]];
drivers/net/wireless/realtek/rtw89/phy.c
3061
cur.idx++) {
drivers/net/wireless/realtek/rtw89/phy.c
3062
v[cur.idx % 4] =
drivers/net/wireless/realtek/rtw89/phy.c
3065
&cur);
drivers/net/wireless/realtek/rtw89/phy.c
3067
if ((cur.idx + 1) % 4)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1933
static inline u32 __btc_ctrl_rst_all_time(u32 cur)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1935
return cur & ~B_AX_FORCE_PWR_BY_RATE_EN;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1938
static inline u32 __btc_ctrl_gen_all_time(u32 cur, u32 val)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1940
u32 hv = cur & ~B_AX_FORCE_PWR_BY_RATE_VALUE_MASK;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1951
static inline u32 __btc_ctrl_rst_gnt_bt(u32 cur)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1953
return cur & ~B_AX_TXAGC_BT_EN;
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1956
static inline u32 __btc_ctrl_gen_gnt_bt(u32 cur, u32 val)
drivers/net/wireless/realtek/rtw89/rtw8852a.c
1958
u32 ov = cur & ~B_AX_TXAGC_BT_MASK;
drivers/net/wireless/silabs/wfx/bh.c
265
u32 cur, prev;
drivers/net/wireless/silabs/wfx/bh.c
267
wfx_control_reg_read(wdev, &cur);
drivers/net/wireless/silabs/wfx/bh.c
268
prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
drivers/net/wireless/silabs/wfx/bh.c
272
if (!(cur & CTRL_NEXT_LEN_MASK))
drivers/net/wireless/silabs/wfx/bh.c
274
cur);
drivers/net/wireless/silabs/wfx/bh.c
277
prev, cur);
drivers/net/wireless/silabs/wfx/wfx.h
113
static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev, struct wfx_vif *cur)
drivers/net/wireless/silabs/wfx/wfx.h
119
if (!cur)
drivers/net/wireless/silabs/wfx/wfx.h
125
if (tmp == cur)
drivers/net/wwan/t7xx/t7xx_port_wwan.c
107
while (cur) {
drivers/net/wwan/t7xx/t7xx_port_wwan.c
108
cloned = skb_clone(cur, GFP_KERNEL);
drivers/net/wwan/t7xx/t7xx_port_wwan.c
109
cloned->len = skb_headlen(cur);
drivers/net/wwan/t7xx/t7xx_port_wwan.c
117
cnt += cur->len;
drivers/net/wwan/t7xx/t7xx_port_wwan.c
118
if (cur == skb)
drivers/net/wwan/t7xx/t7xx_port_wwan.c
119
cur = skb_shinfo(skb)->frag_list;
drivers/net/wwan/t7xx/t7xx_port_wwan.c
121
cur = cur->next;
drivers/net/wwan/t7xx/t7xx_port_wwan.c
58
struct sk_buff *cur = skb, *tx_skb;
drivers/net/wwan/t7xx/t7xx_port_wwan.c
67
actual = cur->len;
drivers/net/wwan/t7xx/t7xx_port_wwan.c
74
skb_put_data(tx_skb, cur->data + offset, len);
drivers/net/wwan/t7xx/t7xx_port_wwan.c
93
struct sk_buff *cur = skb, *cloned;
drivers/nvme/host/core.c
1486
struct nvme_ns_id_desc *cur, bool *csi_seen)
drivers/nvme/host/core.c
1489
void *data = cur;
drivers/nvme/host/core.c
1491
switch (cur->nidt) {
drivers/nvme/host/core.c
1493
if (cur->nidl != NVME_NIDT_EUI64_LEN) {
drivers/nvme/host/core.c
1495
warn_str, cur->nidl);
drivers/nvme/host/core.c
1500
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
drivers/nvme/host/core.c
1503
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
drivers/nvme/host/core.c
1505
warn_str, cur->nidl);
drivers/nvme/host/core.c
1510
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
drivers/nvme/host/core.c
1513
if (cur->nidl != NVME_NIDT_UUID_LEN) {
drivers/nvme/host/core.c
1515
warn_str, cur->nidl);
drivers/nvme/host/core.c
1520
uuid_copy(&ids->uuid, data + sizeof(*cur));
drivers/nvme/host/core.c
1523
if (cur->nidl != NVME_NIDT_CSI_LEN) {
drivers/nvme/host/core.c
1525
warn_str, cur->nidl);
drivers/nvme/host/core.c
1528
memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
drivers/nvme/host/core.c
1533
return cur->nidl;
drivers/nvme/host/core.c
1568
struct nvme_ns_id_desc *cur = data + pos;
drivers/nvme/host/core.c
1570
if (cur->nidl == 0)
drivers/nvme/host/core.c
1573
len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
drivers/nvme/host/core.c
1577
len += sizeof(*cur);
drivers/nvme/target/core.c
122
struct nvmet_ns *cur;
drivers/nvme/target/core.c
126
nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
drivers/nvme/target/core.c
127
nsid = cur->nsid;
drivers/nvme/target/passthru.c
54
struct nvme_ns_id_desc *cur = data + pos;
drivers/nvme/target/passthru.c
56
if (cur->nidl == 0)
drivers/nvme/target/passthru.c
58
if (cur->nidt == NVME_NIDT_CSI) {
drivers/nvme/target/passthru.c
59
memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
drivers/nvme/target/passthru.c
63
len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
drivers/nvme/target/passthru.c
68
struct nvme_ns_id_desc *cur = data;
drivers/nvme/target/passthru.c
70
cur->nidt = NVME_NIDT_CSI;
drivers/nvme/target/passthru.c
71
cur->nidl = NVME_NIDT_CSI_LEN;
drivers/nvme/target/passthru.c
72
memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
drivers/nvmem/core.c
738
unsigned int cur = 0;
drivers/nvmem/core.c
744
if (keepout->start < cur) {
drivers/nvmem/core.c
763
((keepout->start != cur) &&
drivers/nvmem/core.c
764
(keepout->start - cur < nvmem->word_size))) {
drivers/nvmem/core.c
782
cur = keepout->end;
drivers/of/base.c
1309
it->cur = list;
drivers/of/base.c
1324
if (!it->cur || it->phandle_end >= it->list_end)
drivers/of/base.c
1327
it->cur = it->phandle_end;
drivers/of/base.c
1330
it->phandle = be32_to_cpup(it->cur++);
drivers/of/base.c
1372
if (it->cur + count > it->list_end) {
drivers/of/base.c
1376
count, it->list_end - it->cur);
drivers/of/base.c
1380
count, it->list_end - it->cur);
drivers/of/base.c
1385
it->phandle_end = it->cur + count;
drivers/of/base.c
1412
args[i] = be32_to_cpup(it->cur++);
drivers/of/base.c
1524
struct device_node *cur, *new = NULL;
drivers/of/base.c
1545
cur = out_args->np;
drivers/of/base.c
1546
ret = of_property_read_u32(cur, cells_name, &list_size);
drivers/of/base.c
1555
while (cur) {
drivers/of/base.c
1557
map = of_get_property(cur, map_name, &map_len);
drivers/of/base.c
1564
mask = of_get_property(cur, mask_name, NULL);
drivers/of/base.c
1610
pass = of_get_property(cur, pass_name, NULL);
drivers/of/base.c
1633
of_node_put(cur);
drivers/of/base.c
1634
cur = new;
drivers/of/base.c
1638
of_node_put(cur);
drivers/of/fdt.c
102
int cur;
drivers/of/fdt.c
106
for (cur = fdt_first_property_offset(blob, offset);
drivers/of/fdt.c
107
cur >= 0;
drivers/of/fdt.c
108
cur = fdt_next_property_offset(blob, cur)) {
drivers/of/fdt.c
113
val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
drivers/of/fdt.c
115
pr_warn("Cannot locate property at 0x%x\n", cur);
drivers/of/fdt.c
120
pr_warn("Cannot find property name at 0x%x\n", cur);
drivers/of/property.c
626
const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur,
drivers/of/property.c
629
const void *curv = cur;
drivers/of/property.c
634
if (!cur) {
drivers/of/property.c
639
curv += sizeof(*cur);
drivers/of/property.c
649
const char *of_prop_next_string(const struct property *prop, const char *cur)
drivers/of/property.c
651
const void *curv = cur;
drivers/of/property.c
656
if (!cur)
drivers/of/property.c
659
curv += strlen(cur) + 1;
drivers/of/resolver.c
79
char *cur, *end, *node_path, *prop_name, *s;
drivers/of/resolver.c
88
for (cur = value; cur < end; cur += len + 1) {
drivers/of/resolver.c
89
len = strlen(cur);
drivers/of/resolver.c
91
node_path = cur;
drivers/of/resolver.c
92
s = strchr(cur, ':');
drivers/parisc/eisa.c
435
char *cur = str;
drivers/parisc/eisa.c
439
while (cur != NULL) {
drivers/parisc/eisa.c
442
val = (int) simple_strtoul(cur, &pe, 0);
drivers/parisc/eisa.c
453
if ((cur = strchr(cur, ','))) {
drivers/parisc/eisa.c
454
cur++;
drivers/pci/controller/pci-mvebu.c
484
struct mvebu_pcie_window *cur)
drivers/pci/controller/pci-mvebu.c
488
if (desired->base == cur->base && desired->remap == cur->remap &&
drivers/pci/controller/pci-mvebu.c
489
desired->size == cur->size)
drivers/pci/controller/pci-mvebu.c
492
if (cur->size != 0) {
drivers/pci/controller/pci-mvebu.c
493
mvebu_pcie_del_windows(port, cur->base, cur->size);
drivers/pci/controller/pci-mvebu.c
494
cur->size = 0;
drivers/pci/controller/pci-mvebu.c
495
cur->base = 0;
drivers/pci/controller/pci-mvebu.c
510
cur->size = 0;
drivers/pci/controller/pci-mvebu.c
511
cur->base = 0;
drivers/pci/controller/pci-mvebu.c
515
*cur = *desired;
drivers/pci/hotplug/rpaphp_core.c
340
const __be32 *cur;
drivers/pci/hotplug/rpaphp_core.c
348
cur = of_prop_next_u32(info, NULL, &count);
drivers/pci/hotplug/rpaphp_core.c
349
if (cur)
drivers/pci/hotplug/rpaphp_core.c
350
cur++;
drivers/pci/hotplug/rpaphp_core.c
354
of_read_drc_info_cell(&info, &cur, &drc);
drivers/pinctrl/bcm/pinctrl-bcm2835.c
295
enum bcm2835_fsel cur;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
300
cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
drivers/pinctrl/bcm/pinctrl-bcm2835.c
303
bcm2835_functions[cur]);
drivers/pinctrl/bcm/pinctrl-bcm2835.c
305
if (cur == fsel)
drivers/pinctrl/bcm/pinctrl-bcm2835.c
308
if (cur != BCM2835_FSEL_GPIO_IN && fsel != BCM2835_FSEL_GPIO_IN) {
drivers/pinctrl/bcm/pinctrl-brcmstb.c
117
cur = (val >> BIT_TO_SHIFT(bit)) & fsel_mask;
drivers/pinctrl/bcm/pinctrl-brcmstb.c
121
pc->func_names[cur]);
drivers/pinctrl/bcm/pinctrl-brcmstb.c
123
if (cur != fsel) {
drivers/pinctrl/bcm/pinctrl-brcmstb.c
87
int cur;
drivers/pinctrl/pinctrl-single.c
122
int cur;
drivers/pinctrl/pinctrl-single.c
697
i = pcs->pins.cur;
drivers/pinctrl/pinctrl-single.c
718
pcs->pins.cur++;
drivers/pinctrl/sophgo/pinctrl-cv18xx.c
176
static int cv1800_dt_node_to_map_post(struct device_node *cur,
drivers/pinctrl/sophgo/pinctrl-cv18xx.c
185
ret = of_property_read_u32(cur, "power-source", &power);
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
66
static int sophgo_dt_node_to_map_post(struct device_node *cur,
drivers/pinctrl/sophgo/pinctrl-sophgo-common.c
72
return pctrl->data->cfg_ops->dt_node_to_map_post(cur, pctrl,
drivers/pinctrl/sophgo/pinctrl-sophgo.h
46
int (*dt_node_to_map_post)(struct device_node *cur,
drivers/platform/x86/eeepc-laptop.c
323
int cur;
drivers/platform/x86/eeepc-laptop.c
328
c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
drivers/platform/x86/eeepc-laptop.c
329
if (c->cur < 0)
drivers/platform/x86/eeepc-laptop.c
332
c->num = (c->cur >> 8) & 0xff;
drivers/platform/x86/eeepc-laptop.c
333
c->cur &= 0xff;
drivers/platform/x86/eeepc-laptop.c
365
return sysfs_emit(buf, "%#x\n", (c.num << 8) | c.cur);
drivers/platform/x86/msi-wmi.c
188
ktime_t cur = ktime_get_real();
drivers/platform/x86/msi-wmi.c
189
ktime_t diff = ktime_sub(cur, last_pressed);
drivers/platform/x86/msi-wmi.c
198
last_pressed = cur;
drivers/pnp/core.c
60
struct pnp_protocol *cur = to_pnp_protocol(pos);
drivers/pnp/core.c
61
if (cur->number == nodenum) {
drivers/power/sequencing/core.c
302
const struct pwrseq_unit_data *tmp, **cur;
drivers/power/sequencing/core.c
310
for (cur = data->deps; cur && *cur; cur++) {
drivers/power/sequencing/core.c
311
tmp = radix_tree_lookup(visited_units, (unsigned long)*cur);
drivers/power/sequencing/core.c
317
ret = pwrseq_check_unit_deps(*cur, visited_units);
drivers/power/supply/apm_power.c
121
union power_supply_propval cur;
drivers/power/supply/apm_power.c
181
if (_MPSY_PROP(cur_avg_prop, &cur)) {
drivers/power/supply/apm_power.c
183
if (_MPSY_PROP(cur_now_prop, &cur))
drivers/power/supply/apm_power.c
188
return ((cur.intval - full.intval) * 60L) / I.intval;
drivers/power/supply/apm_power.c
190
return -((cur.intval - empty.intval) * 60L) / I.intval;
drivers/power/supply/apm_power.c
217
union power_supply_propval empty, full, cur;
drivers/power/supply/apm_power.c
256
if (_MPSY_PROP(avg_prop, &cur)) {
drivers/power/supply/apm_power.c
258
if (_MPSY_PROP(now_prop, &cur))
drivers/power/supply/apm_power.c
269
ret = ((cur.intval - empty.intval) * 100L) /
drivers/power/supply/sc2731_charger.c
110
static int sc2731_charger_set_current(struct sc2731_charger_info *info, u32 cur)
drivers/power/supply/sc2731_charger.c
115
if (cur > SC2731_CURRENT_LIMIT_2000)
drivers/power/supply/sc2731_charger.c
116
cur = SC2731_CURRENT_LIMIT_2000;
drivers/power/supply/sc2731_charger.c
117
else if (cur < SC2731_CURRENT_PRECHG)
drivers/power/supply/sc2731_charger.c
118
cur = SC2731_CURRENT_PRECHG;
drivers/power/supply/sc2731_charger.c
121
val = (cur - SC2731_CURRENT_PRECHG) / SC2731_CURRENT_STEP;
drivers/power/supply/sc2731_charger.c
150
u32 *cur)
drivers/power/supply/sc2731_charger.c
160
*cur = val * SC2731_CURRENT_STEP + SC2731_CURRENT_PRECHG;
drivers/power/supply/sc2731_charger.c
166
u32 *cur)
drivers/power/supply/sc2731_charger.c
179
*cur = SC2731_CURRENT_LIMIT_100;
drivers/power/supply/sc2731_charger.c
183
*cur = SC2731_CURRENT_LIMIT_2000;
drivers/power/supply/sc2731_charger.c
187
*cur = SC2731_CURRENT_LIMIT_900;
drivers/power/supply/sc2731_charger.c
191
*cur = SC2731_CURRENT_LIMIT_500;
drivers/power/supply/sc2731_charger.c
244
u32 cur;
drivers/power/supply/sc2731_charger.c
260
ret = sc2731_charger_get_current(info, &cur);
drivers/power/supply/sc2731_charger.c
264
val->intval = cur * 1000;
drivers/power/supply/sc2731_charger.c
272
ret = sc2731_charger_get_current_limit(info, &cur);
drivers/power/supply/sc2731_charger.c
276
val->intval = cur * 1000;
drivers/power/supply/sc27xx_fuel_gauge.c
288
int volt, cur, oci, ocv, ret;
drivers/power/supply/sc27xx_fuel_gauge.c
309
&cur);
drivers/power/supply/sc27xx_fuel_gauge.c
313
cur <<= 1;
drivers/power/supply/sc27xx_fuel_gauge.c
314
oci = sc27xx_fgu_adc_to_current(data, cur - SC27XX_FGU_CUR_BASIC_ADC);
drivers/power/supply/sc27xx_fuel_gauge.c
407
u32 cur;
drivers/power/supply/sc27xx_fuel_gauge.c
410
&cur);
drivers/power/supply/sc27xx_fuel_gauge.c
418
*val = sc27xx_fgu_adc_to_current(data, cur - SC27XX_FGU_CUR_BASIC_ADC);
drivers/power/supply/sc27xx_fuel_gauge.c
473
int ret, cur;
drivers/power/supply/sc27xx_fuel_gauge.c
475
ret = regmap_read(data->regmap, data->base + SC27XX_FGU_CURRENT, &cur);
drivers/power/supply/sc27xx_fuel_gauge.c
483
*val = sc27xx_fgu_adc_to_current(data, cur - SC27XX_FGU_CUR_BASIC_ADC);
drivers/power/supply/sc27xx_fuel_gauge.c
490
int vol, cur, ret, temp, resistance;
drivers/power/supply/sc27xx_fuel_gauge.c
496
ret = sc27xx_fgu_get_current(data, &cur);
drivers/power/supply/sc27xx_fuel_gauge.c
512
*val = vol * 1000 - cur * resistance;
drivers/power/supply/twl4030_charger.c
242
int cur;
drivers/power/supply/twl4030_charger.c
253
cur = bci->ac_cur;
drivers/power/supply/twl4030_charger.c
256
cur = bci->usb_cur;
drivers/power/supply/twl4030_charger.c
258
if (cur > bci->usb_cur_target) {
drivers/power/supply/twl4030_charger.c
259
cur = bci->usb_cur_target;
drivers/power/supply/twl4030_charger.c
260
bci->usb_cur = cur;
drivers/power/supply/twl4030_charger.c
262
if (cur < bci->usb_cur_target)
drivers/power/supply/twl4030_charger.c
273
if (cur > 852000)
drivers/power/supply/twl4030_charger.c
354
reg = ua2regval(cur, cgain);
drivers/pwm/pwm-sti.c
161
struct pwm_device *cur = pc->cur;
drivers/pwm/pwm-sti.c
168
period_same = (period_ns == pwm_get_period(cur));
drivers/pwm/pwm-sti.c
183
((ncfg == 1) && (pwm->hwpwm == cur->hwpwm)) ||
drivers/pwm/pwm-sti.c
184
((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) ||
drivers/pwm/pwm-sti.c
228
pc->cur = pwm;
drivers/pwm/pwm-sti.c
92
struct pwm_device *cur;
drivers/s390/block/dasd_devmap.c
382
char *cur;
drivers/s390/block/dasd_devmap.c
386
cur = dasd[i];
drivers/s390/block/dasd_devmap.c
387
if (!cur)
drivers/s390/block/dasd_devmap.c
389
if (*cur == '\0')
drivers/s390/block/dasd_devmap.c
392
rc = dasd_parse_keyword(cur);
drivers/s390/block/dasd_devmap.c
394
rc = dasd_parse_range(cur);
drivers/scsi/ibmvscsi/ibmvfc.c
3728
crq = &async_crq->msgs.async[async_crq->cur];
drivers/scsi/ibmvscsi/ibmvfc.c
3730
if (++async_crq->cur == async_crq->size)
drivers/scsi/ibmvscsi/ibmvfc.c
3731
async_crq->cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.c
3751
crq = &queue->msgs.crq[queue->cur];
drivers/scsi/ibmvscsi/ibmvfc.c
3753
if (++queue->cur == queue->size)
drivers/scsi/ibmvscsi/ibmvfc.c
3754
queue->cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.c
3903
crq = &scrq->msgs.scrq[scrq->cur].crq;
drivers/scsi/ibmvscsi/ibmvfc.c
3905
if (++scrq->cur == scrq->size)
drivers/scsi/ibmvscsi/ibmvfc.c
3906
scrq->cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.c
5850
queue->cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.c
6013
scrq->cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.c
716
vhost->async_crq.cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.c
988
crq->cur = 0;
drivers/scsi/ibmvscsi/ibmvfc.h
798
int size, cur;
drivers/scsi/ibmvscsi/ibmvscsi.c
173
crq = &queue->msgs[queue->cur];
drivers/scsi/ibmvscsi/ibmvscsi.c
175
if (++queue->cur == queue->size)
drivers/scsi/ibmvscsi/ibmvscsi.c
176
queue->cur = 0;
drivers/scsi/ibmvscsi/ibmvscsi.c
299
queue->cur = 0;
drivers/scsi/ibmvscsi/ibmvscsi.c
367
queue->cur = 0;
drivers/scsi/ibmvscsi/ibmvscsi.h
45
int size, cur;
drivers/scsi/lpfc/lpfc_ct.c
3592
struct lpfc_vmid *vmp, *cur;
drivers/scsi/lpfc/lpfc_ct.c
3671
hash_for_each(vport->hash_table, bucket, cur, hnode)
drivers/scsi/lpfc/lpfc_ct.c
3672
hash_del(&cur->hnode);
drivers/scsi/lpfc/lpfc_scsi.c
5493
struct lpfc_vmid *cur;
drivers/scsi/lpfc/lpfc_scsi.c
5503
hash_for_each(vport->hash_table, bucket, cur, hnode)
drivers/scsi/lpfc/lpfc_scsi.c
5504
hash_del(&cur->hnode);
drivers/scsi/lpfc/lpfc_vmid.c
301
struct lpfc_vmid *cur;
drivers/scsi/lpfc/lpfc_vmid.c
322
hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
drivers/scsi/lpfc/lpfc_vmid.c
323
hash_del(&cur->hnode);
drivers/scsi/ncr53c8xx.c
642
char *cur = setup_token;
drivers/scsi/ncr53c8xx.c
646
while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
drivers/scsi/ncr53c8xx.c
649
if (!strncmp(p, cur, pc - cur))
drivers/scsi/ncr53c8xx.c
651
cur = pc;
drivers/scsi/ncr53c8xx.c
659
char *cur = str;
drivers/scsi/ncr53c8xx.c
664
while (cur != NULL && (pc = strchr(cur, ':')) != NULL) {
drivers/scsi/ncr53c8xx.c
678
switch (get_setup_token(cur)) {
drivers/scsi/ncr53c8xx.c
767
printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur);
drivers/scsi/ncr53c8xx.c
771
if ((cur = strchr(cur, ARG_SEP)) != NULL)
drivers/scsi/ncr53c8xx.c
772
++cur;
drivers/scsi/snic/snic_disc.c
174
struct list_head *cur, *nxt;
drivers/scsi/snic/snic_disc.c
177
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
drivers/scsi/snic/snic_disc.c
178
tgt = list_entry(cur, struct snic_tgt, list);
drivers/scsi/snic/snic_disc.c
534
struct list_head *cur, *nxt;
drivers/scsi/snic/snic_disc.c
542
list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
drivers/scsi/snic/snic_disc.c
543
tgt = list_entry(cur, struct snic_tgt, list);
drivers/scsi/snic/snic_io.c
387
struct list_head *cur, *nxt;
drivers/scsi/snic/snic_io.c
391
list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
drivers/scsi/snic/snic_io.c
392
rqi = list_entry(cur, struct snic_req_info, list);
drivers/scsi/sym53c8xx_2/sym_fw.c
351
u32 *end, *cur;
drivers/scsi/sym53c8xx_2/sym_fw.c
354
cur = start;
drivers/scsi/sym53c8xx_2/sym_fw.c
357
while (cur < end) {
drivers/scsi/sym53c8xx_2/sym_fw.c
359
opcode = *cur;
drivers/scsi/sym53c8xx_2/sym_fw.c
369
sym_name(np), (int) (cur-start));
drivers/scsi/sym53c8xx_2/sym_fw.c
370
++cur;
drivers/scsi/sym53c8xx_2/sym_fw.c
379
*cur++ = 0;
drivers/scsi/sym53c8xx_2/sym_fw.c
384
printf ("%d: <%x>\n", (int) (cur-start),
drivers/scsi/sym53c8xx_2/sym_fw.c
408
tmp1 = cur[1];
drivers/scsi/sym53c8xx_2/sym_fw.c
409
tmp2 = cur[2];
drivers/scsi/sym53c8xx_2/sym_fw.c
412
sym_name(np), (int) (cur-start));
drivers/scsi/sym53c8xx_2/sym_fw.c
485
*cur++ = cpu_to_scr(opcode);
drivers/scsi/sym53c8xx_2/sym_fw.c
492
*cur = cpu_to_scr(*cur);
drivers/scsi/sym53c8xx_2/sym_fw.c
493
++cur;
drivers/scsi/sym53c8xx_2/sym_fw.c
501
old = *cur;
drivers/scsi/sym53c8xx_2/sym_fw.c
534
*cur++ = cpu_to_scr(new);
drivers/soc/mediatek/mtk-mmsys.c
184
enum mtk_ddp_comp_id cur,
drivers/soc/mediatek/mtk-mmsys.c
192
if (cur == routes[i].from_comp && next == routes[i].to_comp)
drivers/soc/mediatek/mtk-mmsys.c
203
enum mtk_ddp_comp_id cur,
drivers/soc/mediatek/mtk-mmsys.c
211
if (cur == routes[i].from_comp && next == routes[i].to_comp)
drivers/spi/spi-bcm-qspi.c
601
static bool bcmspi_parms_did_change(const struct bcm_qspi_parms * const cur,
drivers/spi/spi-bcm-qspi.c
604
return (cur->speed_hz != prev->speed_hz) ||
drivers/spi/spi-bcm-qspi.c
605
(cur->mode != prev->mode) ||
drivers/spi/spi-bcm-qspi.c
606
(cur->bits_per_word != prev->bits_per_word);
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2450
struct ia_css_dvs_grid_info *cur =
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2455
if (!coefs || !cur)
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2462
if (sizeof(*cur) != sizeof(coefs->grid) ||
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2463
memcmp(&coefs->grid, cur, sizeof(coefs->grid))) {
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2483
css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2518
if (sizeof(*cur) != sizeof(dvs2_coefs.grid) ||
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2519
memcmp(&dvs2_coefs.grid, cur, sizeof(dvs2_coefs.grid))) {
drivers/staging/media/atomisp/pci/atomisp_cmd.c
2539
css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
2574
struct ia_css_dvs_grid_info *cur =
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
2577
if (!cur) {
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
2582
if (sizeof(*cur) != sizeof(*atomgrid)) {
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
2587
if (!cur->enable) {
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
2592
return memcmp(atomgrid, cur, sizeof(*cur));
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1147
union acpi_object *obj, *cur = NULL;
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1172
union acpi_object *cur = &obj->package.elements[i];
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1174
if (cur->type == ACPI_TYPE_INTEGER)
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1176
i, cur->type, cur->integer.value);
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1177
else if (cur->type == ACPI_TYPE_STRING)
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1179
i, cur->type, cur->string.pointer);
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1182
i, cur->type);
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1191
cur = &obj->package.elements[i + 1];
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1196
if (!cur) {
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1209
if (cur->type != ACPI_TYPE_STRING) {
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1216
cur->string.pointer);
drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
1217
strscpy(out, cur->string.pointer, *out_len);
drivers/staging/media/imx/imx-media-fim.c
102
fim->enabled = en->cur.val;
drivers/staging/media/imx/imx-media-fim.c
103
fim->icap_flags = icap_edge->cur.val;
drivers/staging/media/imx/imx-media-fim.c
104
fim->icap_channel = icap_chan->cur.val;
drivers/staging/media/imx/imx-media-fim.c
105
fim->num_avg = num->cur.val;
drivers/staging/media/imx/imx-media-fim.c
106
fim->num_skip = skip->cur.val;
drivers/staging/media/imx/imx-media-fim.c
107
fim->tolerance_min = tol_min->cur.val;
drivers/staging/media/imx/imx-media-fim.c
108
fim->tolerance_max = tol_max->cur.val;
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
1010
if (memcmp((void *)(p+2), cur->ssid.ssid, cur->ssid.ssid_length))
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
1013
if (ie_len != cur->ssid.ssid_length)
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
506
struct wlan_bssid_ex *cur = &pmlmeinfo->network;
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
528
if ((ielen != 0 && false == !memcmp((void *)(p+2), (void *)cur->ssid.ssid, cur->ssid.ssid_length))
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
945
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
drivers/target/target_core_configfs.c
2825
char *cur = page;
drivers/target/target_core_configfs.c
2832
cur += scnprintf(cur, end - cur, "%s/%s\n",
drivers/target/target_core_configfs.c
2835
if (WARN_ON_ONCE(cur >= end))
drivers/target/target_core_configfs.c
2840
return cur - page;
drivers/tty/vt/keyboard.c
1985
static char *vt_kdskbsent(char *kbs, unsigned char cur)
drivers/tty/vt/keyboard.c
1988
char *cur_f = func_table[cur];
drivers/tty/vt/keyboard.c
1995
func_table[cur] = kbs;
drivers/tty/vt/keyboard.c
1997
return __test_and_set_bit(cur, is_kmalloc) ? cur_f : NULL;
drivers/tty/vt/keyboard.c
555
int i, cur = fg_console;
drivers/tty/vt/keyboard.c
559
cur = want_console;
drivers/tty/vt/keyboard.c
561
for (i = cur - 1; i != cur; i--) {
drivers/tty/vt/keyboard.c
572
int i, cur = fg_console;
drivers/tty/vt/keyboard.c
576
cur = want_console;
drivers/tty/vt/keyboard.c
578
for (i = cur+1; i != cur; i++) {
drivers/usb/gadget/udc/bcm63xx_udc.c
1207
struct bcm63xx_req *breq = our_req(req), *cur;
drivers/usb/gadget/udc/bcm63xx_udc.c
1217
cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
drivers/usb/gadget/udc/bcm63xx_udc.c
1220
if (breq == cur) {
drivers/usb/musb/tusb6010_omap.c
385
int cur = (reg & (0xf << (i * 5))) >> (i * 5);
drivers/usb/musb/tusb6010_omap.c
386
if (cur == 0) {
drivers/usb/typec/altmodes/displayport.c
591
u8 cur;
drivers/usb/typec/altmodes/displayport.c
597
cur = DP_CONF_CURRENTLY(dp->data.conf);
drivers/usb/typec/altmodes/displayport.c
599
len = sprintf(buf, "%s ", cur ? "USB" : "[USB]");
drivers/usb/typec/altmodes/displayport.c
602
if (i == cur)
drivers/usb/typec/altmodes/displayport.c
694
u8 cur;
drivers/usb/typec/altmodes/displayport.c
699
cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
drivers/usb/typec/altmodes/displayport.c
705
if (i == cur)
drivers/usb/typec/ucsi/displayport.c
54
u8 cur = 0;
drivers/usb/typec/ucsi/displayport.c
70
ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
drivers/usb/typec/ucsi/displayport.c
74
cur = 0xff;
drivers/usb/typec/ucsi/displayport.c
77
if (cur != 0xff) {
drivers/usb/typec/ucsi/displayport.c
78
ret = dp->con->port_altmode[cur] == alt ? 0 : -EBUSY;
drivers/usb/typec/ucsi/thunderbolt.c
100
if (cur != 0xff) {
drivers/usb/typec/ucsi/thunderbolt.c
101
if (cur >= UCSI_MAX_ALTMODES || con->port_altmode[cur] != alt)
drivers/usb/typec/ucsi/thunderbolt.c
86
u8 cur = 0;
drivers/usb/typec/ucsi/thunderbolt.c
93
ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
drivers/usb/typec/ucsi/thunderbolt.c
97
cur = 0xff;
drivers/usb/typec/ucsi/ucsi.c
325
u8 cur;
drivers/usb/typec/ucsi/ucsi.c
329
ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
drivers/usb/typec/ucsi/ucsi.c
336
cur = 0xff;
drivers/usb/typec/ucsi/ucsi.c
339
if (cur < UCSI_MAX_ALTMODES)
drivers/usb/typec/ucsi/ucsi.c
340
altmode = typec_altmode_get_partner(con->port_altmode[cur]);
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1051
u32 cur = hisi_acc_vdev->mig_state;
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1054
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1065
if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1079
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1086
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1097
if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1102
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1113
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1121
if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) {
drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
1126
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
drivers/vfio/pci/mlx5/main.c
1070
u32 cur = mvdev->mig_state;
drivers/vfio/pci/mlx5/main.c
1073
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/mlx5/main.c
1081
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
drivers/vfio/pci/mlx5/main.c
1089
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
drivers/vfio/pci/mlx5/main.c
1090
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
drivers/vfio/pci/mlx5/main.c
1098
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
drivers/vfio/pci/mlx5/main.c
1099
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
drivers/vfio/pci/mlx5/main.c
1107
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/mlx5/main.c
1118
if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/mlx5/main.c
1123
if ((cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
drivers/vfio/pci/mlx5/main.c
1124
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
drivers/vfio/pci/mlx5/main.c
1149
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
drivers/vfio/pci/mlx5/main.c
1160
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/mlx5/main.c
1165
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
drivers/vfio/pci/mlx5/main.c
1166
(cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
drivers/vfio/pci/mlx5/main.c
1178
if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/pds/lm.c
365
enum vfio_device_mig_state cur = pds_vfio->state;
drivers/vfio/pci/pds/lm.c
368
if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/pds/lm.c
382
if (cur == VFIO_DEVICE_STATE_STOP_COPY && next == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/pds/lm.c
388
if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RESUMING) {
drivers/vfio/pci/pds/lm.c
396
if (cur == VFIO_DEVICE_STATE_RESUMING && next == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/pds/lm.c
405
if (cur == VFIO_DEVICE_STATE_RUNNING && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
drivers/vfio/pci/pds/lm.c
416
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_RUNNING) {
drivers/vfio/pci/pds/lm.c
426
if (cur == VFIO_DEVICE_STATE_STOP && next == VFIO_DEVICE_STATE_RUNNING_P2P) {
drivers/vfio/pci/pds/lm.c
435
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && next == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/qat/main.c
381
u32 cur = qat_vdev->mig_state;
drivers/vfio/pci/qat/main.c
393
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
drivers/vfio/pci/qat/main.c
394
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
drivers/vfio/pci/qat/main.c
401
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
drivers/vfio/pci/qat/main.c
402
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
drivers/vfio/pci/qat/main.c
407
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
drivers/vfio/pci/qat/main.c
408
(cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
drivers/vfio/pci/qat/main.c
411
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/qat/main.c
422
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
drivers/vfio/pci/qat/main.c
433
if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
drivers/vfio/pci/qat/main.c
434
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
drivers/vfio/pci/qat/main.c
435
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
drivers/vfio/pci/qat/main.c
440
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
drivers/vfio/pci/qat/main.c
441
(cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
drivers/vfio/pci/qat/main.c
452
if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/qat/main.c
463
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/vfio_pci_core.c
2010
struct vfio_pci_core_device *cur;
drivers/vfio/pci/vfio_pci_core.c
2023
list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
drivers/vfio/pci/vfio_pci_core.c
2024
if (cur->pdev == physfn) {
drivers/vfio/pci/vfio_pci_core.c
2025
vdev->sriov_pf_core_dev = cur;
drivers/vfio/pci/vfio_pci_core.c
2385
struct vfio_pci_core_device *cur;
drivers/vfio/pci/vfio_pci_core.c
2388
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
drivers/vfio/pci/vfio_pci_core.c
2389
ret = pm_runtime_resume_and_get(&cur->pdev->dev);
drivers/vfio/pci/vfio_pci_core.c
2397
list_for_each_entry_continue_reverse(cur, &dev_set->device_list,
drivers/vfio/pci/vfio_pci_core.c
2399
pm_runtime_put(&cur->pdev->dev);
drivers/vfio/pci/vfio_pci_core.c
2522
struct vfio_pci_core_device *cur;
drivers/vfio/pci/vfio_pci_core.c
2529
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list)
drivers/vfio/pci/vfio_pci_core.c
2530
needs_reset |= cur->needs_reset;
drivers/vfio/pci/vfio_pci_core.c
2543
struct vfio_pci_core_device *cur;
drivers/vfio/pci/vfio_pci_core.c
2565
list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) {
drivers/vfio/pci/vfio_pci_core.c
2567
cur->needs_reset = false;
drivers/vfio/pci/vfio_pci_core.c
2570
pm_runtime_put(&cur->pdev->dev);
drivers/vfio/pci/virtio/migrate.c
1131
u32 cur = virtvdev->mig_state;
drivers/vfio/pci/virtio/migrate.c
1134
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/virtio/migrate.c
1139
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
drivers/vfio/pci/virtio/migrate.c
1144
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
drivers/vfio/pci/virtio/migrate.c
1145
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) {
drivers/vfio/pci/virtio/migrate.c
1153
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) ||
drivers/vfio/pci/virtio/migrate.c
1154
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) {
drivers/vfio/pci/virtio/migrate.c
1161
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/virtio/migrate.c
1172
if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) ||
drivers/vfio/pci/virtio/migrate.c
1173
(cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) ||
drivers/vfio/pci/virtio/migrate.c
1174
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) {
drivers/vfio/pci/virtio/migrate.c
1179
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
drivers/vfio/pci/virtio/migrate.c
1190
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/virtio/migrate.c
1195
if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) ||
drivers/vfio/pci/virtio/migrate.c
1196
(cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
drivers/vfio/pci/virtio/migrate.c
1208
if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/xe/main.c
280
u32 cur = xe_vdev->mig_state;
drivers/vfio/pci/xe/main.c
284
"state: %s->%s\n", vfio_dev_state_str(cur), vfio_dev_state_str(new));
drivers/vfio/pci/xe/main.c
295
if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) {
drivers/vfio/pci/xe/main.c
303
if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) ||
drivers/vfio/pci/xe/main.c
304
(cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P))
drivers/vfio/pci/xe/main.c
307
if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) {
drivers/vfio/pci/xe/main.c
315
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
drivers/vfio/pci/xe/main.c
334
if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/xe/main.c
345
if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
drivers/vfio/pci/xe/main.c
364
if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
drivers/vfio/pci/xe/main.c
375
WARN(true, "Unknown state transition %d->%d", cur, new);
drivers/vfio/pci/xe/main.c
381
vfio_dev_state_str(cur), vfio_dev_state_str(new), ret);
drivers/vfio/vfio_main.c
140
struct vfio_device *cur;
drivers/vfio/vfio_main.c
145
list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
drivers/vfio/vfio_main.c
146
open_count += cur->open_count;
drivers/vfio/vfio_main.c
155
struct vfio_device *cur;
drivers/vfio/vfio_main.c
159
list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
drivers/vfio/vfio_main.c
160
if (cur->dev == dev)
drivers/vfio/vfio_main.c
161
return cur;
drivers/video/fbdev/core/fb_defio.c
102
list_for_each_entry(cur, &fbdefio->pagereflist, list) {
drivers/video/fbdev/core/fb_defio.c
103
if (cur->offset > pageref->offset)
drivers/video/fbdev/core/fb_defio.c
106
pos = &cur->list;
drivers/video/fbdev/core/fb_defio.c
79
struct fb_deferred_io_pageref *pageref, *cur;
drivers/video/fbdev/nvidia/nv_hw.c
75
int cur = par->CurrentState->cursor1;
drivers/video/fbdev/nvidia/nv_hw.c
85
return (cur & 0x01);
drivers/video/fbdev/riva/riva_hw.c
173
char cur;
drivers/video/fbdev/riva/riva_hw.c
251
int last, next, cur;
drivers/video/fbdev/riva/riva_hw.c
259
cur = ainfo->cur;
drivers/video/fbdev/riva/riva_hw.c
327
last = cur;
drivers/video/fbdev/riva/riva_hw.c
328
cur = next;
drivers/video/fbdev/riva/riva_hw.c
330
switch (cur)
drivers/video/fbdev/riva/riva_hw.c
333
if (last==cur) misses = 0;
drivers/video/fbdev/riva/riva_hw.c
337
if (last!=cur)
drivers/video/fbdev/riva/riva_hw.c
349
if (last==cur) misses = 0;
drivers/video/fbdev/riva/riva_hw.c
353
if (last!=cur)
drivers/video/fbdev/riva/riva_hw.c
365
if (last==cur) misses = 0;
drivers/video/fbdev/riva/riva_hw.c
457
ainfo->cur = ENGINE;
drivers/video/fbdev/riva/riva_hw.c
469
ainfo->cur = MPORT;
drivers/video/fbdev/riva/riva_hw.c
484
ainfo->cur = GRAPHICS;
drivers/video/fbdev/riva/riva_hw.c
496
ainfo->cur = VIDEO;
drivers/video/fbdev/via/hw.c
1376
struct via_pll_config cur, up, down, best = {0, 1, 0};
drivers/video/fbdev/via/hw.c
1381
cur.rshift = limits[i].rshift;
drivers/video/fbdev/via/hw.c
1382
cur.divisor = limits[i].divisor;
drivers/video/fbdev/via/hw.c
1383
cur.multiplier = clk / ((f0 / cur.divisor)>>cur.rshift);
drivers/video/fbdev/via/hw.c
1384
f = abs(get_pll_output_frequency(f0, cur) - clk);
drivers/video/fbdev/via/hw.c
1385
up = down = cur;
drivers/video/fbdev/via/hw.c
1389
cur = up;
drivers/video/fbdev/via/hw.c
1391
cur = down;
drivers/video/fbdev/via/hw.c
1393
if (cur.multiplier < limits[i].multiplier_min)
drivers/video/fbdev/via/hw.c
1394
cur.multiplier = limits[i].multiplier_min;
drivers/video/fbdev/via/hw.c
1395
else if (cur.multiplier > limits[i].multiplier_max)
drivers/video/fbdev/via/hw.c
1396
cur.multiplier = limits[i].multiplier_max;
drivers/video/fbdev/via/hw.c
1398
f = abs(get_pll_output_frequency(f0, cur) - clk);
drivers/video/fbdev/via/hw.c
1400
best = cur;
drivers/virtio/virtio_pci_modern_dev.c
405
u64 cur;
drivers/virtio/virtio_pci_modern_dev.c
408
cur = vp_ioread32(&cfg->device_feature);
drivers/virtio/virtio_pci_modern_dev.c
409
features[i >> 1] |= cur << (32 * (i & 1));
drivers/virtio/virtio_pci_modern_dev.c
431
u64 cur;
drivers/virtio/virtio_pci_modern_dev.c
434
cur = vp_ioread32(&cfg->guest_feature);
drivers/virtio/virtio_pci_modern_dev.c
435
features[i >> 1] |= cur << (32 * (i & 1));
drivers/virtio/virtio_pci_modern_dev.c
452
u32 cur = features[i >> 1] >> (32 * (i & 1));
drivers/virtio/virtio_pci_modern_dev.c
455
vp_iowrite32(cur, &cfg->guest_feature);
drivers/watchdog/bcm2835_wdt.c
59
uint32_t cur;
drivers/watchdog/bcm2835_wdt.c
61
cur = readl(wdt->base + PM_RSTC);
drivers/watchdog/bcm2835_wdt.c
63
return !!(cur & PM_RSTC_WRCFG_FULL_RESET);
drivers/watchdog/bcm2835_wdt.c
69
uint32_t cur;
drivers/watchdog/bcm2835_wdt.c
76
cur = readl_relaxed(wdt->base + PM_RSTC);
drivers/watchdog/bcm2835_wdt.c
77
writel_relaxed(PM_PASSWORD | (cur & PM_RSTC_WRCFG_CLR) |
drivers/watchdog/bcm_kona_wdt.c
112
int ctl, cur, ctl_sec, cur_sec, res;
drivers/watchdog/bcm_kona_wdt.c
116
cur = cur_val & SECWDOG_COUNT_MASK;
drivers/watchdog/bcm_kona_wdt.c
118
cur_sec = TICKS_TO_SECS(cur, wdt);
drivers/watchdog/bcm_kona_wdt.c
126
cur_sec, cur, cur,
drivers/xen/grant-table.c
1599
unsigned int cur, extra;
drivers/xen/grant-table.c
1601
cur = nr_grant_frames;
drivers/xen/grant-table.c
1604
if (cur + extra > gnttab_max_grant_frames()) {
drivers/xen/grant-table.c
1608
cur, extra, gnttab_max_grant_frames(),
drivers/xen/grant-table.c
1613
rc = gnttab_map(cur, cur + extra - 1);
fs/afs/validation.c
200
time64_t cur = volume->creation_time;
fs/afs/validation.c
205
_enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
fs/afs/validation.c
207
if (cur == TIME64_MIN) {
fs/afs/validation.c
212
if (new == cur)
fs/afs/validation.c
220
if (cur != old)
fs/afs/validation.c
267
time64_t cur = volume->update_time;
fs/afs/validation.c
271
_enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
fs/afs/validation.c
273
if (cur == TIME64_MIN) {
fs/afs/validation.c
278
if (new == cur)
fs/afs/validation.c
294
if (cur == old) {
fs/btrfs/backref.c
2598
u32 cur;
fs/btrfs/backref.c
2633
for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
fs/btrfs/backref.c
2638
cur, found_key.objectid,
fs/btrfs/backref.c
3206
struct btrfs_backref_node *cur)
fs/btrfs/backref.c
3218
cur->is_reloc_root = 1;
fs/btrfs/backref.c
3221
root = find_reloc_root(cache->fs_info, cur->bytenr);
fs/btrfs/backref.c
3224
cur->root = root;
fs/btrfs/backref.c
3230
list_add(&cur->list, &cache->useless_node);
fs/btrfs/backref.c
3243
cur->level + 1);
fs/btrfs/backref.c
3260
btrfs_backref_link_edge(edge, cur, upper);
fs/btrfs/backref.c
3282
struct btrfs_backref_node *cur)
fs/btrfs/backref.c
3305
if (btrfs_root_level(&root->root_item) == cur->level) {
fs/btrfs/backref.c
3307
ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
fs/btrfs/backref.c
3320
list_add(&cur->list, &cache->useless_node);
fs/btrfs/backref.c
3322
cur->root = root;
fs/btrfs/backref.c
3327
level = cur->level + 1;
fs/btrfs/backref.c
3343
if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
fs/btrfs/backref.c
3346
cur->bytenr, level - 1, btrfs_root_id(root),
fs/btrfs/backref.c
3352
lower = cur;
fs/btrfs/backref.c
3461
struct btrfs_backref_node *cur)
fs/btrfs/backref.c
3467
ret = btrfs_backref_iter_start(iter, cur->bytenr);
fs/btrfs/backref.c
3484
WARN_ON(cur->checked);
fs/btrfs/backref.c
3485
if (!list_empty(&cur->upper)) {
fs/btrfs/backref.c
3490
ASSERT(list_is_singular(&cur->upper));
fs/btrfs/backref.c
3491
edge = list_first_entry(&cur->upper, struct btrfs_backref_edge,
fs/btrfs/backref.c
3548
ret = handle_direct_tree_backref(cache, &key, cur);
fs/btrfs/backref.c
3558
&key, node_key, cur);
fs/btrfs/backref.c
3568
cur->checked = 1;
fs/btrfs/backref.h
462
struct btrfs_backref_node *cur);
fs/btrfs/compression.c
377
u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
fs/btrfs/compression.c
409
while (cur < compressed_end) {
fs/btrfs/compression.c
411
pgoff_t pg_index = cur >> PAGE_SHIFT;
fs/btrfs/compression.c
420
u64 offset = offset_in_folio(folio, cur);
fs/btrfs/compression.c
434
cur += (folio_sz - offset);
fs/btrfs/compression.c
445
cur += folio_size(folio);
fs/btrfs/compression.c
463
btrfs_lock_extent(tree, cur, page_end, NULL);
fs/btrfs/compression.c
465
em = btrfs_lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
fs/btrfs/compression.c
473
if (!em || cur < em->start ||
fs/btrfs/compression.c
474
(cur + fs_info->sectorsize > btrfs_extent_map_end(em)) ||
fs/btrfs/compression.c
478
btrfs_unlock_extent(tree, cur, page_end, NULL);
fs/btrfs/compression.c
483
add_size = min(btrfs_extent_map_end(em), page_end + 1) - cur;
fs/btrfs/compression.c
485
btrfs_unlock_extent(tree, cur, page_end, NULL);
fs/btrfs/compression.c
498
offset_in_folio(folio, cur))) {
fs/btrfs/compression.c
509
btrfs_folio_set_lock(fs_info, folio, cur, add_size);
fs/btrfs/compression.c
511
cur += add_size;
fs/btrfs/compression.h
73
static inline u32 btrfs_calc_input_length(struct folio *folio, u64 range_end, u64 cur)
fs/btrfs/compression.h
76
ASSERT(folio_pos(folio) <= cur);
fs/btrfs/compression.h
77
ASSERT(cur < folio_next_pos(folio));
fs/btrfs/compression.h
78
return umin(range_end, folio_next_pos(folio)) - cur;
fs/btrfs/ctree.c
4614
struct extent_buffer *cur;
fs/btrfs/ctree.c
4626
cur = btrfs_read_lock_root_node(root);
fs/btrfs/ctree.c
4627
level = btrfs_header_level(cur);
fs/btrfs/ctree.c
4629
path->nodes[level] = cur;
fs/btrfs/ctree.c
4632
if (btrfs_header_generation(cur) < min_trans) {
fs/btrfs/ctree.c
4637
nritems = btrfs_header_nritems(cur);
fs/btrfs/ctree.c
4638
level = btrfs_header_level(cur);
fs/btrfs/ctree.c
4639
sret = btrfs_bin_search(cur, 0, min_key, &slot);
fs/btrfs/ctree.c
4652
btrfs_item_key_to_cpu(cur, min_key, slot);
fs/btrfs/ctree.c
4664
gen = btrfs_node_ptr_generation(cur, slot);
fs/btrfs/ctree.c
4687
cur = btrfs_read_node_slot(cur, slot);
fs/btrfs/ctree.c
4688
if (IS_ERR(cur)) {
fs/btrfs/ctree.c
4689
ret = PTR_ERR(cur);
fs/btrfs/ctree.c
4693
btrfs_tree_read_lock(cur);
fs/btrfs/ctree.c
4696
path->nodes[level - 1] = cur;
fs/btrfs/defrag.c
1003
range_len = em->len - (cur - em->start);
fs/btrfs/defrag.c
1024
if (btrfs_test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
fs/btrfs/defrag.c
1066
if (last->start + last->len != cur)
fs/btrfs/defrag.c
1074
range_len = min(btrfs_extent_map_end(em), start + len) - cur;
fs/btrfs/defrag.c
1084
ASSERT(last->start + last->len <= cur);
fs/btrfs/defrag.c
1085
if (last->start + last->len == cur) {
fs/btrfs/defrag.c
1100
new->start = cur;
fs/btrfs/defrag.c
1105
cur = btrfs_extent_map_end(em);
fs/btrfs/defrag.c
1124
*last_scanned_ret = max(cur, *last_scanned_ret);
fs/btrfs/defrag.c
1201
u64 cur = start;
fs/btrfs/defrag.c
1214
for (int i = 0; cur < start + len && i < nr_pages; i++) {
fs/btrfs/defrag.c
1215
folios[i] = defrag_prepare_one_folio(inode, cur >> PAGE_SHIFT);
fs/btrfs/defrag.c
1221
cur = folio_next_pos(folios[i]);
fs/btrfs/defrag.c
1232
btrfs_lock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state);
fs/btrfs/defrag.c
1258
btrfs_unlock_extent(&inode->io_tree, folio_pos(folios[0]), cur - 1, &cached_state);
fs/btrfs/defrag.c
1363
u64 cur;
fs/btrfs/defrag.c
1414
cur = round_down(range->start, fs_info->sectorsize);
fs/btrfs/defrag.c
1421
start_index = cur >> PAGE_SHIFT;
fs/btrfs/defrag.c
1425
while (cur < last_byte) {
fs/btrfs/defrag.c
1427
u64 last_scanned = cur;
fs/btrfs/defrag.c
1436
cluster_end = (((cur >> PAGE_SHIFT) +
fs/btrfs/defrag.c
1454
ret = defrag_one_cluster(inode, ra, cur,
fs/btrfs/defrag.c
1455
cluster_end + 1 - cur, extent_thresh,
fs/btrfs/defrag.c
1466
cur = max(cluster_end + 1, last_scanned);
fs/btrfs/defrag.c
1478
range->start = cur;
fs/btrfs/defrag.c
223
u64 cur = 0;
fs/btrfs/defrag.c
245
if (cur >= i_size_read(&inode->vfs_inode)) {
fs/btrfs/defrag.c
254
range.start = cur;
fs/btrfs/defrag.c
266
cur = max(cur + fs_info->sectorsize, range.start);
fs/btrfs/defrag.c
372
struct extent_buffer *cur;
fs/btrfs/defrag.c
400
cur = btrfs_read_node_slot(parent, i);
fs/btrfs/defrag.c
401
if (IS_ERR(cur))
fs/btrfs/defrag.c
402
return PTR_ERR(cur);
fs/btrfs/defrag.c
406
btrfs_tree_lock(cur);
fs/btrfs/defrag.c
407
ret = btrfs_force_cow_block(trans, root, cur, parent, i,
fs/btrfs/defrag.c
408
&cur, search_start,
fs/btrfs/defrag.c
413
btrfs_tree_unlock(cur);
fs/btrfs/defrag.c
414
free_extent_buffer(cur);
fs/btrfs/defrag.c
417
search_start = cur->start;
fs/btrfs/defrag.c
418
last_block = cur->start;
fs/btrfs/defrag.c
420
btrfs_tree_unlock(cur);
fs/btrfs/defrag.c
421
free_extent_buffer(cur);
fs/btrfs/defrag.c
962
u64 cur = start;
fs/btrfs/defrag.c
965
while (cur < start + len) {
fs/btrfs/defrag.c
972
em = defrag_lookup_extent(&inode->vfs_inode, cur, newer_than, locked);
fs/btrfs/dir-item.c
377
u32 cur = 0;
fs/btrfs/dir-item.c
385
while (cur < total_len) {
fs/btrfs/dir-item.c
395
cur += this_len;
fs/btrfs/disk-io.c
1506
struct btrfs_transaction *cur;
fs/btrfs/disk-io.c
1518
cur = fs_info->running_transaction;
fs/btrfs/disk-io.c
1519
if (!cur) {
fs/btrfs/disk-io.c
1524
delta = ktime_get_seconds() - cur->start_time;
fs/btrfs/disk-io.c
1526
cur->state < TRANS_STATE_COMMIT_PREP &&
fs/btrfs/disk-io.c
1534
transid = cur->transid;
fs/btrfs/disk-io.c
1575
u64 cur;
fs/btrfs/disk-io.c
1581
cur = btrfs_backup_tree_root_gen(root_backup);
fs/btrfs/disk-io.c
1582
if (cur == newest_gen)
fs/btrfs/disk-io.c
2288
unsigned int cur = 0; /* Offset inside the sys chunk array */
fs/btrfs/disk-io.c
2302
while (cur < sys_array_size) {
fs/btrfs/disk-io.c
2311
disk_key = (struct btrfs_disk_key *)(sb->sys_chunk_array + cur);
fs/btrfs/disk-io.c
2314
if (unlikely(cur + len > sys_array_size))
fs/btrfs/disk-io.c
2316
cur += len;
fs/btrfs/disk-io.c
2322
key.type, cur);
fs/btrfs/disk-io.c
2325
chunk = (struct btrfs_chunk *)(sb->sys_chunk_array + cur);
fs/btrfs/disk-io.c
2327
if (unlikely(cur + btrfs_chunk_item_size(num_stripes) > sys_array_size))
fs/btrfs/disk-io.c
2333
type, cur);
fs/btrfs/disk-io.c
2340
cur += btrfs_chunk_item_size(num_stripes);
fs/btrfs/disk-io.c
2346
cur, sys_array_size);
fs/btrfs/disk-io.c
4214
u64 cur = 0;
fs/btrfs/disk-io.c
4219
while (btrfs_find_first_extent_bit(&trans->dirty_pages, cur,
fs/btrfs/disk-io.c
4223
cur = found_end + 1;
fs/btrfs/disk-io.c
531
u64 cur;
fs/btrfs/disk-io.c
539
cur = page_start + cur_bit * fs_info->sectorsize;
fs/btrfs/disk-io.c
541
eb = find_extent_buffer(fs_info, cur);
fs/btrfs/extent-tree.c
1430
u64 cur = bytenr;
fs/btrfs/extent-tree.c
1437
while (cur < end) {
fs/btrfs/extent-tree.c
1442
num_bytes = end - cur;
fs/btrfs/extent-tree.c
1443
stripes = btrfs_map_discard(fs_info, cur, &num_bytes, &num_stripes,
fs/btrfs/extent-tree.c
1481
cur += num_bytes;
fs/btrfs/extent_io.c
1032
for (u64 cur = start; cur <= end; cur += blocksize) {
fs/btrfs/extent_io.c
1034
unsigned long pg_offset = offset_in_folio(folio, cur);
fs/btrfs/extent_io.c
1040
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
fs/btrfs/extent_io.c
1041
if (cur >= last_byte) {
fs/btrfs/extent_io.c
1042
folio_zero_range(folio, pg_offset, end - cur + 1);
fs/btrfs/extent_io.c
1043
end_folio_read(vi, folio, true, cur, end - cur + 1);
fs/btrfs/extent_io.c
1046
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
fs/btrfs/extent_io.c
1047
end_folio_read(vi, folio, true, cur, blocksize);
fs/btrfs/extent_io.c
1057
em = get_extent_map(BTRFS_I(inode), folio, cur, locked_end - cur + 1, em_cached);
fs/btrfs/extent_io.c
1059
end_folio_read(vi, folio, false, cur, end + 1 - cur);
fs/btrfs/extent_io.c
1062
extent_offset = cur - em->start;
fs/btrfs/extent_io.c
1063
BUG_ON(btrfs_extent_map_end(em) <= cur);
fs/btrfs/extent_io.c
1064
BUG_ON(end < cur);
fs/btrfs/extent_io.c
1136
end_folio_read(vi, folio, true, cur, blocksize);
fs/btrfs/extent_io.c
1141
end_folio_read(vi, folio, true, cur, blocksize);
fs/btrfs/extent_io.c
1179
u64 cur = *fileoff;
fs/btrfs/extent_io.c
1182
folio = filemap_get_folio(inode->vfs_inode.i_mapping, cur >> PAGE_SHIFT);
fs/btrfs/extent_io.c
1218
if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) {
fs/btrfs/extent_io.c
1220
ordered->file_offset + ordered->num_bytes) - cur;
fs/btrfs/extent_io.c
1227
ASSERT(btrfs_folio_test_dirty(fs_info, folio, cur, range_len));
fs/btrfs/extent_io.c
1240
if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) {
fs/btrfs/extent_io.c
1242
ordered->file_offset + ordered->num_bytes) - cur;
fs/btrfs/extent_io.c
1248
ASSERT(btrfs_folio_test_uptodate(fs_info, folio, cur, range_len));
fs/btrfs/extent_io.c
1250
*fileoff = cur + range_len;
fs/btrfs/extent_io.c
1273
u64 cur = max(start, ordered->file_offset);
fs/btrfs/extent_io.c
1275
while (cur < range_end) {
fs/btrfs/extent_io.c
1278
can_skip = can_skip_one_ordered_range(inode, ordered, &cur);
fs/btrfs/extent_io.c
1737
u64 cur;
fs/btrfs/extent_io.c
1767
cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits);
fs/btrfs/extent_io.c
1769
if (cur >= i_size) {
fs/btrfs/extent_io.c
1772
ordered = btrfs_lookup_first_ordered_range(inode, cur,
fs/btrfs/extent_io.c
1782
cur - ordered->file_offset);
fs/btrfs/extent_io.c
1786
btrfs_mark_ordered_io_finished(inode, folio, cur,
fs/btrfs/extent_io.c
1796
btrfs_folio_clear_dirty(fs_info, folio, cur, fs_info->sectorsize);
fs/btrfs/extent_io.c
1799
ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size);
fs/btrfs/extent_io.c
2097
unsigned int cur;
fs/btrfs/extent_io.c
2110
batch->cur = 0;
fs/btrfs/extent_io.c
2115
if (batch->cur >= batch->nr)
fs/btrfs/extent_io.c
2117
return batch->ebs[batch->cur++];
fs/btrfs/extent_io.c
2637
u64 cur = start;
fs/btrfs/extent_io.c
2648
while (cur <= end) {
fs/btrfs/extent_io.c
2653
folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT);
fs/btrfs/extent_io.c
2660
cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
fs/btrfs/extent_io.c
2661
cur_len = cur_end + 1 - cur;
fs/btrfs/extent_io.c
2663
cur, cur_len, false);
fs/btrfs/extent_io.c
2665
cur = cur_end;
fs/btrfs/extent_io.c
2670
cur_len = cur_end + 1 - cur;
fs/btrfs/extent_io.c
2681
ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len,
fs/btrfs/extent_io.c
2688
btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
fs/btrfs/extent_io.c
2693
cur = cur_end + 1;
fs/btrfs/extent_io.c
3971
size_t cur;
fs/btrfs/extent_io.c
3995
cur = min(len, unit_size - offset);
fs/btrfs/extent_io.c
3997
memcpy(dst, kaddr + offset, cur);
fs/btrfs/extent_io.c
3999
dst += cur;
fs/btrfs/extent_io.c
4000
len -= cur;
fs/btrfs/extent_io.c
4011
size_t cur;
fs/btrfs/extent_io.c
4031
cur = min(len, unit_size - offset);
fs/btrfs/extent_io.c
4033
if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
fs/btrfs/extent_io.c
4038
dst += cur;
fs/btrfs/extent_io.c
4039
len -= cur;
fs/btrfs/extent_io.c
4051
size_t cur;
fs/btrfs/extent_io.c
4067
cur = min(len, unit_size - offset);
fs/btrfs/extent_io.c
4069
ret = memcmp(ptr, kaddr + offset, cur);
fs/btrfs/extent_io.c
4073
ptr += cur;
fs/btrfs/extent_io.c
4074
len -= cur;
fs/btrfs/extent_io.c
4121
size_t cur;
fs/btrfs/extent_io.c
4146
cur = min(len, unit_size - offset);
fs/btrfs/extent_io.c
4149
memmove(kaddr + offset, src, cur);
fs/btrfs/extent_io.c
4151
memcpy(kaddr + offset, src, cur);
fs/btrfs/extent_io.c
4153
src += cur;
fs/btrfs/extent_io.c
4154
len -= cur;
fs/btrfs/extent_io.c
4170
unsigned long cur = start;
fs/btrfs/extent_io.c
4177
while (cur < start + len) {
fs/btrfs/extent_io.c
4178
unsigned long index = get_eb_folio_index(eb, cur);
fs/btrfs/extent_io.c
4179
unsigned int offset = get_eb_offset_in_folio(eb, cur);
fs/btrfs/extent_io.c
4180
unsigned int cur_len = min(start + len - cur, unit_size - offset);
fs/btrfs/extent_io.c
4185
cur += cur_len;
fs/btrfs/extent_io.c
4201
unsigned long cur = 0;
fs/btrfs/extent_io.c
4205
while (cur < src->len) {
fs/btrfs/extent_io.c
4206
unsigned long index = get_eb_folio_index(src, cur);
fs/btrfs/extent_io.c
4207
unsigned long offset = get_eb_offset_in_folio(src, cur);
fs/btrfs/extent_io.c
4211
write_extent_buffer(dst, addr, cur, cur_len);
fs/btrfs/extent_io.c
4213
cur += cur_len;
fs/btrfs/extent_io.c
4224
size_t cur;
fs/btrfs/extent_io.c
4240
cur = min(len, (unsigned long)(unit_size - offset));
fs/btrfs/extent_io.c
4243
read_extent_buffer(src, kaddr + offset, src_offset, cur);
fs/btrfs/extent_io.c
4245
src_offset += cur;
fs/btrfs/extent_io.c
4246
len -= cur;
fs/btrfs/extent_io.c
4451
size_t cur;
fs/btrfs/extent_io.c
4462
cur = min_t(unsigned long, len, src_off_in_folio + 1);
fs/btrfs/extent_io.c
4463
cur = min(cur, dst_off_in_folio + 1);
fs/btrfs/extent_io.c
4466
cur + 1;
fs/btrfs/extent_io.c
4467
use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
fs/btrfs/extent_io.c
4468
cur);
fs/btrfs/extent_io.c
4470
__write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
fs/btrfs/extent_io.c
4473
dst_end -= cur;
fs/btrfs/extent_io.c
4474
src_end -= cur;
fs/btrfs/extent_io.c
4475
len -= cur;
fs/btrfs/extent_map.c
607
struct extent_map *cur,
fs/btrfs/extent_map.c
618
WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
fs/btrfs/extent_map.c
619
ASSERT(btrfs_extent_map_in_tree(cur));
fs/btrfs/extent_map.c
620
if (!(cur->flags & EXTENT_FLAG_LOGGING))
fs/btrfs/extent_map.c
621
list_del_init(&cur->list);
fs/btrfs/extent_map.c
622
rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root);
fs/btrfs/extent_map.c
623
RB_CLEAR_NODE(&cur->rb_node);
fs/btrfs/free-space-cache.c
411
if (io_ctl->cur) {
fs/btrfs/free-space-cache.c
412
io_ctl->cur = NULL;
fs/btrfs/free-space-cache.c
421
io_ctl->cur = page_address(io_ctl->page);
fs/btrfs/free-space-cache.c
422
io_ctl->orig = io_ctl->cur;
fs/btrfs/free-space-cache.c
425
clear_page(io_ctl->cur);
fs/btrfs/free-space-cache.c
505
io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
fs/btrfs/free-space-cache.c
508
put_unaligned_le64(generation, io_ctl->cur);
fs/btrfs/free-space-cache.c
509
io_ctl->cur += sizeof(u64);
fs/btrfs/free-space-cache.c
520
io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
fs/btrfs/free-space-cache.c
523
cache_gen = get_unaligned_le64(io_ctl->cur);
fs/btrfs/free-space-cache.c
531
io_ctl->cur += sizeof(u64);
fs/btrfs/free-space-cache.c
583
if (!io_ctl->cur)
fs/btrfs/free-space-cache.c
586
entry = io_ctl->cur;
fs/btrfs/free-space-cache.c
591
io_ctl->cur += sizeof(struct btrfs_free_space_entry);
fs/btrfs/free-space-cache.c
610
if (!io_ctl->cur)
fs/btrfs/free-space-cache.c
617
if (io_ctl->cur != io_ctl->orig) {
fs/btrfs/free-space-cache.c
624
copy_page(io_ctl->cur, bitmap);
fs/btrfs/free-space-cache.c
637
if (io_ctl->cur != io_ctl->orig)
fs/btrfs/free-space-cache.c
654
if (!io_ctl->cur) {
fs/btrfs/free-space-cache.c
660
e = io_ctl->cur;
fs/btrfs/free-space-cache.c
664
io_ctl->cur += sizeof(struct btrfs_free_space_entry);
fs/btrfs/free-space-cache.c
684
copy_page(entry->bitmap, io_ctl->cur);
fs/btrfs/free-space-cache.h
100
void *cur, *orig;
fs/btrfs/inode.c
7681
u64 cur;
fs/btrfs/inode.c
7720
cur = page_start;
fs/btrfs/inode.c
7721
while (cur < page_end) {
fs/btrfs/inode.c
7727
ordered = btrfs_lookup_first_ordered_range(inode, cur,
fs/btrfs/inode.c
7728
page_end + 1 - cur);
fs/btrfs/inode.c
7738
if (ordered->file_offset > cur) {
fs/btrfs/inode.c
7752
ASSERT(range_end + 1 - cur < U32_MAX);
fs/btrfs/inode.c
7753
range_len = range_end + 1 - cur;
fs/btrfs/inode.c
7754
if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
fs/btrfs/inode.c
7763
btrfs_folio_clear_ordered(fs_info, folio, cur, range_len);
fs/btrfs/inode.c
7774
btrfs_clear_extent_bit(tree, cur, range_end,
fs/btrfs/inode.c
7782
cur - ordered->file_offset);
fs/btrfs/inode.c
7792
cur, range_end + 1 - cur)) {
fs/btrfs/inode.c
7818
btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
fs/btrfs/inode.c
7820
btrfs_clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
fs/btrfs/inode.c
7824
cur = range_end + 1;
fs/btrfs/inode.c
9654
u64 cur;
fs/btrfs/inode.c
9684
cur = 0;
fs/btrfs/inode.c
9685
while (cur < count) {
fs/btrfs/inode.c
9686
size_t bytes = min_t(size_t, count - cur,
fs/btrfs/inode.c
9695
cur += bytes;
fs/btrfs/ioctl.c
4543
u64 cur;
fs/btrfs/ioctl.c
4562
cur = 0;
fs/btrfs/ioctl.c
4563
while (cur < priv->count) {
fs/btrfs/ioctl.c
4564
size_t bytes = min_t(size_t, priv->count - cur, PAGE_SIZE - page_offset);
fs/btrfs/ioctl.c
4573
cur += bytes;
fs/btrfs/ordered-data.c
1091
struct rb_node *cur;
fs/btrfs/ordered-data.c
1124
cur = &entry->rb_node;
fs/btrfs/ordered-data.c
1127
prev = cur;
fs/btrfs/ordered-data.c
1128
next = rb_next(cur);
fs/btrfs/ordered-data.c
1130
prev = rb_prev(cur);
fs/btrfs/ordered-data.c
1131
next = cur;
fs/btrfs/ordered-data.c
483
u64 cur = file_offset;
fs/btrfs/ordered-data.c
489
while (cur < end) {
fs/btrfs/ordered-data.c
494
node = ordered_tree_search(inode, cur);
fs/btrfs/ordered-data.c
506
if (cur >= entry_end) {
fs/btrfs/ordered-data.c
515
cur = entry->file_offset;
fs/btrfs/ordered-data.c
523
if (cur < entry->file_offset) {
fs/btrfs/ordered-data.c
524
cur = entry->file_offset;
fs/btrfs/ordered-data.c
536
len = this_end - cur;
fs/btrfs/ordered-data.c
539
if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
fs/btrfs/ordered-data.c
544
cur += len;
fs/btrfs/print-tree.c
272
u32 cur = 0;
fs/btrfs/print-tree.c
274
while (cur < size) {
fs/btrfs/print-tree.c
286
cur += len;
fs/btrfs/print-tree.c
294
u32 cur = 0;
fs/btrfs/print-tree.c
296
while (cur < size) {
fs/btrfs/print-tree.c
303
cur += len;
fs/btrfs/print-tree.c
311
u32 cur = 0;
fs/btrfs/print-tree.c
314
while (cur < size) {
fs/btrfs/print-tree.c
323
cur += len;
fs/btrfs/props.c
165
u32 total_len, cur, this_len;
fs/btrfs/props.c
192
cur = 0;
fs/btrfs/props.c
195
while (cur < total_len) {
fs/btrfs/props.c
240
cur += this_len;
fs/btrfs/raid56.c
1803
struct btrfs_raid_bio *cur;
fs/btrfs/raid56.c
1809
cur = list_first_entry(&plug->rbio_list,
fs/btrfs/raid56.c
1811
list_del_init(&cur->plug_list);
fs/btrfs/raid56.c
1813
if (rbio_is_full(cur)) {
fs/btrfs/raid56.c
1815
start_async_work(cur, rmw_rbio_work);
fs/btrfs/raid56.c
1819
if (rbio_can_merge(last, cur)) {
fs/btrfs/raid56.c
1820
merge_rbio(last, cur);
fs/btrfs/raid56.c
1821
free_raid_bio(cur);
fs/btrfs/raid56.c
1826
last = cur;
fs/btrfs/raid56.c
197
struct btrfs_stripe_hash *cur;
fs/btrfs/raid56.c
221
cur = h + i;
fs/btrfs/raid56.c
222
INIT_LIST_HEAD(&cur->hash_list);
fs/btrfs/raid56.c
223
spin_lock_init(&cur->lock);
fs/btrfs/raid56.c
669
struct btrfs_raid_bio *cur)
fs/btrfs/raid56.c
672
test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
fs/btrfs/raid56.c
683
test_bit(RBIO_CACHE_BIT, &cur->flags))
fs/btrfs/raid56.c
686
if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical)
fs/btrfs/raid56.c
690
if (last->operation != cur->operation)
fs/btrfs/raid56.c
792
struct btrfs_raid_bio *cur;
fs/btrfs/raid56.c
801
list_for_each_entry(cur, &h->hash_list, hash_list) {
fs/btrfs/raid56.c
802
if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
fs/btrfs/raid56.c
805
spin_lock(&cur->bio_list_lock);
fs/btrfs/raid56.c
808
if (bio_list_empty(&cur->bio_list) &&
fs/btrfs/raid56.c
809
list_empty(&cur->plug_list) &&
fs/btrfs/raid56.c
810
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
fs/btrfs/raid56.c
811
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
fs/btrfs/raid56.c
812
list_del_init(&cur->hash_list);
fs/btrfs/raid56.c
813
refcount_dec(&cur->refs);
fs/btrfs/raid56.c
815
steal_rbio(cur, rbio);
fs/btrfs/raid56.c
816
cache_drop = cur;
fs/btrfs/raid56.c
817
spin_unlock(&cur->bio_list_lock);
fs/btrfs/raid56.c
823
if (rbio_can_merge(cur, rbio)) {
fs/btrfs/raid56.c
824
merge_rbio(cur, rbio);
fs/btrfs/raid56.c
825
spin_unlock(&cur->bio_list_lock);
fs/btrfs/raid56.c
837
list_for_each_entry(pending, &cur->plug_list, plug_list) {
fs/btrfs/raid56.c
840
spin_unlock(&cur->bio_list_lock);
fs/btrfs/raid56.c
851
list_add_tail(&rbio->plug_list, &cur->plug_list);
fs/btrfs/raid56.c
852
spin_unlock(&cur->bio_list_lock);
fs/btrfs/raid56.c
947
static void rbio_endio_bio_list(struct bio *cur, blk_status_t status)
fs/btrfs/raid56.c
951
while (cur) {
fs/btrfs/raid56.c
952
next = cur->bi_next;
fs/btrfs/raid56.c
953
cur->bi_next = NULL;
fs/btrfs/raid56.c
954
cur->bi_status = status;
fs/btrfs/raid56.c
955
bio_endio(cur);
fs/btrfs/raid56.c
956
cur = next;
fs/btrfs/raid56.c
966
struct bio *cur = bio_list_get(&rbio->bio_list);
fs/btrfs/raid56.c
993
rbio_endio_bio_list(cur, status);
fs/btrfs/relocation.c
2793
u64 cur;
fs/btrfs/relocation.c
2855
cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
fs/btrfs/relocation.c
2856
while (cur <= folio_end) {
fs/btrfs/relocation.c
2911
cur += clamped_len;
fs/btrfs/relocation.c
2914
if (cur >= extent_end) {
fs/btrfs/relocation.c
345
struct btrfs_backref_node *cur;
fs/btrfs/relocation.c
347
cur = list_first_entry(useless_node, struct btrfs_backref_node,
fs/btrfs/relocation.c
349
list_del_init(&cur->list);
fs/btrfs/relocation.c
352
ASSERT(list_empty(&cur->upper));
fs/btrfs/relocation.c
354
if (cur == node)
fs/btrfs/relocation.c
358
while (!list_empty(&cur->lower)) {
fs/btrfs/relocation.c
362
edge = list_first_entry(&cur->lower, struct btrfs_backref_edge,
fs/btrfs/relocation.c
374
mark_block_processed(rc, cur);
fs/btrfs/relocation.c
381
if (cur->level > 0) {
fs/btrfs/relocation.c
382
cur->detached = 1;
fs/btrfs/relocation.c
384
rb_erase(&cur->rb_node, &cache->rb_root);
fs/btrfs/relocation.c
385
btrfs_backref_free_node(cache, cur);
fs/btrfs/relocation.c
414
struct btrfs_backref_node *cur;
fs/btrfs/relocation.c
434
cur = node;
fs/btrfs/relocation.c
439
node_key, cur);
fs/btrfs/relocation.c
451
cur = edge->node[UPPER];
fs/btrfs/scrub.c
1901
for (unsigned int cur = 0; cur < nr_sectors; cur++)
fs/btrfs/scrub.c
1902
scrub_bio_add_sector(bbio, stripe, cur);
fs/btrfs/send.c
1018
while (cur < total) {
fs/btrfs/send.c
1022
iref = (struct btrfs_inode_ref *)(ptr + cur);
fs/btrfs/send.c
1027
extref = (struct btrfs_inode_extref *)(ptr + cur);
fs/btrfs/send.c
1071
cur += elem_size + name_len;
fs/btrfs/send.c
1105
u32 cur;
fs/btrfs/send.c
1127
cur = 0;
fs/btrfs/send.c
1132
while (cur < total) {
fs/btrfs/send.c
1184
cur += len;
fs/btrfs/send.c
3019
struct recorded_ref *cur;
fs/btrfs/send.c
3022
cur = list_first_entry(head, struct recorded_ref, list);
fs/btrfs/send.c
3023
recorded_ref_free(cur);
fs/btrfs/send.c
3351
struct recorded_ref *cur;
fs/btrfs/send.c
3378
list_for_each_entry(cur, deleted_refs, list) {
fs/btrfs/send.c
3379
ret = dup_ref(cur, &pm->update_refs);
fs/btrfs/send.c
3383
list_for_each_entry(cur, new_refs, list) {
fs/btrfs/send.c
3384
ret = dup_ref(cur, &pm->update_refs);
fs/btrfs/send.c
3474
struct recorded_ref *cur;
fs/btrfs/send.c
3584
list_for_each_entry(cur, &pm->update_refs, list) {
fs/btrfs/send.c
3588
ret = get_inode_info(sctx->send_root, cur->dir, NULL);
fs/btrfs/send.c
3596
ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
fs/btrfs/send.c
4164
struct recorded_ref *cur;
fs/btrfs/send.c
4266
list_for_each_entry(cur, &sctx->new_refs, list) {
fs/btrfs/send.c
4267
ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
fs/btrfs/send.c
4279
ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
fs/btrfs/send.c
4280
cur->name, cur->name_len,
fs/btrfs/send.c
4286
ow_inode, cur->dir, cur->name,
fs/btrfs/send.c
4287
cur->name_len);
fs/btrfs/send.c
4295
ret = refresh_ref_path(sctx, cur);
fs/btrfs/send.c
4301
cur->full_path);
fs/btrfs/send.c
4360
ret = refresh_ref_path(sctx, cur);
fs/btrfs/send.c
4364
ret = send_unlink(sctx, cur->full_path);
fs/btrfs/send.c
4372
list_for_each_entry(cur, &sctx->new_refs, list) {
fs/btrfs/send.c
4380
ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
fs/btrfs/send.c
4390
if (cur == cur2)
fs/btrfs/send.c
4392
if (cur2->dir == cur->dir) {
fs/btrfs/send.c
4403
ret = did_create_dir(sctx, cur->dir);
fs/btrfs/send.c
4407
ret = send_create_inode(sctx, cur->dir);
fs/btrfs/send.c
4410
cache_dir_created(sctx, cur->dir);
fs/btrfs/send.c
4415
ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
fs/btrfs/send.c
4426
ret = wait_for_parent_move(sctx, cur, is_orphan);
fs/btrfs/send.c
4441
ret = rename_current_inode(sctx, valid_path, cur->full_path);
fs/btrfs/send.c
4453
cur->full_path);
fs/btrfs/send.c
4465
ret = update_ref_path(sctx, cur);
fs/btrfs/send.c
4469
ret = send_link(sctx, cur->full_path,
fs/btrfs/send.c
4475
ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
fs/btrfs/send.c
4502
list_for_each_entry(cur, &sctx->deleted_refs, list) {
fs/btrfs/send.c
4503
ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
fs/btrfs/send.c
4512
cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list);
fs/btrfs/send.c
4513
ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
fs/btrfs/send.c
4522
list_for_each_entry(cur, &sctx->deleted_refs, list) {
fs/btrfs/send.c
4523
ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
fs/btrfs/send.c
4525
cur->name, cur->name_len);
fs/btrfs/send.c
4537
ret = update_ref_path(sctx, cur);
fs/btrfs/send.c
4541
ret = send_unlink(sctx, cur->full_path);
fs/btrfs/send.c
4544
if (is_current_inode_path(sctx, cur->full_path))
fs/btrfs/send.c
4547
ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
fs/btrfs/send.c
4572
list_for_each_entry(cur, &check_dirs, list) {
fs/btrfs/send.c
4578
if (cur->dir > sctx->cur_ino)
fs/btrfs/send.c
4581
ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL);
fs/btrfs/send.c
4587
ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
fs/btrfs/send.c
4591
ret = can_rmdir(sctx, cur->dir, cur->dir_gen);
fs/btrfs/send.c
4595
ret = get_cur_path(sctx, cur->dir,
fs/btrfs/send.c
4596
cur->dir_gen, valid_path);
fs/btrfs/send.c
5202
u64 cur = offset;
fs/btrfs/send.c
5212
while (cur < end) {
fs/btrfs/send.c
5213
pgoff_t index = (cur >> PAGE_SHIFT);
fs/btrfs/send.c
5230
pg_offset = offset_in_folio(folio, cur);
fs/btrfs/send.c
5231
cur_len = min_t(unsigned int, end - cur, folio_size(folio) - pg_offset);
fs/btrfs/send.c
5261
cur += cur_len;
fs/btrfs/send.c
625
const struct fs_path *cur = &sctx->cur_inode_path;
fs/btrfs/send.c
627
return (strncmp(path->start, cur->start, fs_path_len(cur)) == 0);
fs/btrfs/send.c
985
u32 cur = 0;
fs/btrfs/sysfs.c
417
for (u32 cur = BTRFS_MIN_BLOCKSIZE; cur <= BTRFS_MAX_BLOCKSIZE; cur *= 2) {
fs/btrfs/sysfs.c
418
if (!btrfs_supported_blocksize(cur))
fs/btrfs/sysfs.c
422
ret += sysfs_emit_at(buf, ret, "%u", cur);
fs/btrfs/tests/extent-io-tests.c
64
#define PRINT_ONE_FLAG(state, dest, cur, name) \
fs/btrfs/tests/extent-io-tests.c
67
cur += scnprintf(dest + cur, STATE_FLAG_STR_LEN - cur, \
fs/btrfs/tests/extent-io-tests.c
68
"%s" #name, cur == 0 ? "" : "|"); \
fs/btrfs/tests/extent-io-tests.c
73
int cur = 0;
fs/btrfs/tests/extent-io-tests.c
76
PRINT_ONE_FLAG(state, dest, cur, DIRTY);
fs/btrfs/tests/extent-io-tests.c
77
PRINT_ONE_FLAG(state, dest, cur, LOCKED);
fs/btrfs/tests/extent-io-tests.c
78
PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG1);
fs/btrfs/tests/extent-io-tests.c
79
PRINT_ONE_FLAG(state, dest, cur, DIRTY_LOG2);
fs/btrfs/tests/extent-io-tests.c
80
PRINT_ONE_FLAG(state, dest, cur, DELALLOC);
fs/btrfs/tests/extent-io-tests.c
81
PRINT_ONE_FLAG(state, dest, cur, DEFRAG);
fs/btrfs/tests/extent-io-tests.c
82
PRINT_ONE_FLAG(state, dest, cur, BOUNDARY);
fs/btrfs/tests/extent-io-tests.c
83
PRINT_ONE_FLAG(state, dest, cur, NODATASUM);
fs/btrfs/tests/extent-io-tests.c
84
PRINT_ONE_FLAG(state, dest, cur, CLEAR_META_RESV);
fs/btrfs/tests/extent-io-tests.c
85
PRINT_ONE_FLAG(state, dest, cur, NEED_WAIT);
fs/btrfs/tests/extent-io-tests.c
86
PRINT_ONE_FLAG(state, dest, cur, NORESERVE);
fs/btrfs/tests/extent-io-tests.c
87
PRINT_ONE_FLAG(state, dest, cur, QGROUP_RESERVED);
fs/btrfs/tests/extent-io-tests.c
88
PRINT_ONE_FLAG(state, dest, cur, CLEAR_DATA_RESV);
fs/btrfs/tree-checker.c
528
u32 cur = 0;
fs/btrfs/tree-checker.c
534
while (cur < item_size) {
fs/btrfs/tree-checker.c
545
if (unlikely(cur + sizeof(*di) > item_size)) {
fs/btrfs/tree-checker.c
548
cur + sizeof(*di), item_size);
fs/btrfs/tree-checker.c
626
if (unlikely(cur + total_size > item_size)) {
fs/btrfs/tree-checker.c
629
cur + total_size, item_size);
fs/btrfs/tree-checker.c
651
cur += total_size;
fs/btrfs/tree-log.c
2548
u32 cur;
fs/btrfs/tree-log.c
2558
cur = 0;
fs/btrfs/tree-log.c
2559
while (cur < total_size) {
fs/btrfs/tree-log.c
2619
cur += this_len;
fs/btrfs/tree-log.c
3003
struct extent_buffer *cur;
fs/btrfs/tree-log.c
3009
cur = path->nodes[*level];
fs/btrfs/tree-log.c
3011
WARN_ON(btrfs_header_level(cur) != *level);
fs/btrfs/tree-log.c
3014
btrfs_header_nritems(cur))
fs/btrfs/tree-log.c
3017
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
fs/btrfs/tree-log.c
3018
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
fs/btrfs/tree-log.c
3022
btrfs_node_key_to_cpu(cur, &check.first_key, path->slots[*level]);
fs/btrfs/tree-log.c
3025
btrfs_header_owner(cur),
fs/btrfs/tree-mod-log.c
150
struct tree_mod_elem *cur;
fs/btrfs/tree-mod-log.c
159
cur = rb_entry(*new, struct tree_mod_elem, node);
fs/btrfs/tree-mod-log.c
161
if (cur->logical < tm->logical)
fs/btrfs/tree-mod-log.c
163
else if (cur->logical > tm->logical)
fs/btrfs/tree-mod-log.c
165
else if (cur->seq < tm->seq)
fs/btrfs/tree-mod-log.c
167
else if (cur->seq > tm->seq)
fs/btrfs/tree-mod-log.c
516
struct tree_mod_elem *cur = NULL;
fs/btrfs/tree-mod-log.c
523
cur = rb_entry(node, struct tree_mod_elem, node);
fs/btrfs/tree-mod-log.c
524
if (cur->logical < start) {
fs/btrfs/tree-mod-log.c
526
} else if (cur->logical > start) {
fs/btrfs/tree-mod-log.c
528
} else if (cur->seq < min_seq) {
fs/btrfs/tree-mod-log.c
533
BUG_ON(found->seq > cur->seq);
fs/btrfs/tree-mod-log.c
534
found = cur;
fs/btrfs/tree-mod-log.c
536
} else if (cur->seq > min_seq) {
fs/btrfs/tree-mod-log.c
539
BUG_ON(found->seq < cur->seq);
fs/btrfs/tree-mod-log.c
540
found = cur;
fs/btrfs/tree-mod-log.c
543
found = cur;
fs/btrfs/volumes.c
3179
u32 cur;
fs/btrfs/volumes.c
3186
cur = 0;
fs/btrfs/volumes.c
3188
while (cur < array_size) {
fs/btrfs/volumes.c
3204
memmove(ptr, ptr + len, array_size - (cur + len));
fs/btrfs/volumes.c
3209
cur += len;
fs/btrfs/xattr.c
293
u32 cur;
fs/btrfs/xattr.c
308
cur = 0;
fs/btrfs/xattr.c
309
while (cur < item_size) {
fs/btrfs/xattr.c
334
cur += this_len;
fs/btrfs/zlib.c
119
u64 cur = filepos;
fs/btrfs/zlib.c
124
while (cur < filepos + length) {
fs/btrfs/zlib.c
131
ret = btrfs_compress_filemap_get_folio(mapping, cur, &folio);
fs/btrfs/zlib.c
135
offset = offset_in_folio(folio, cur);
fs/btrfs/zlib.c
137
filepos + length - cur);
fs/btrfs/zlib.c
140
memcpy(workspace->buf + cur - filepos, data_in, copy_length);
fs/btrfs/zlib.c
143
cur += copy_length;
fs/btrfs/zstd.c
503
u64 cur;
fs/btrfs/zstd.c
506
cur = start + tot_in;
fs/btrfs/zstd.c
512
ret = btrfs_compress_filemap_get_folio(mapping, cur, &in_folio);
fs/btrfs/zstd.c
516
offset_in_folio(in_folio, cur));
fs/btrfs/zstd.c
518
workspace->in_buf.size = btrfs_calc_input_length(in_folio, end, cur);
fs/cachefiles/cache.c
320
struct list_head *cur;
fs/cachefiles/cache.c
327
list_for_each(cur, &cache->volumes) {
fs/cachefiles/cache.c
328
volume = list_entry(cur, struct cachefiles_volume, cache_link);
fs/ceph/mds_client.c
2712
struct dentry *cur;
fs/ceph/mds_client.c
2730
cur = dget(dentry);
fs/ceph/mds_client.c
2734
spin_lock(&cur->d_lock);
fs/ceph/mds_client.c
2735
inode = d_inode(cur);
fs/ceph/mds_client.c
2737
doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur);
fs/ceph/mds_client.c
2738
spin_unlock(&cur->d_lock);
fs/ceph/mds_client.c
2739
parent = dget_parent(cur);
fs/ceph/mds_client.c
2740
} else if (for_wire && inode && dentry != cur &&
fs/ceph/mds_client.c
2742
spin_unlock(&cur->d_lock);
fs/ceph/mds_client.c
2745
} else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) {
fs/ceph/mds_client.c
2746
pos -= cur->d_name.len;
fs/ceph/mds_client.c
2748
spin_unlock(&cur->d_lock);
fs/ceph/mds_client.c
2751
memcpy(path + pos, cur->d_name.name, cur->d_name.len);
fs/ceph/mds_client.c
2752
spin_unlock(&cur->d_lock);
fs/ceph/mds_client.c
2753
parent = dget_parent(cur);
fs/ceph/mds_client.c
2762
memcpy(buf, cur->d_name.name, cur->d_name.len);
fs/ceph/mds_client.c
2763
len = cur->d_name.len;
fs/ceph/mds_client.c
2764
spin_unlock(&cur->d_lock);
fs/ceph/mds_client.c
2765
parent = dget_parent(cur);
fs/ceph/mds_client.c
2770
dput(cur);
fs/ceph/mds_client.c
2780
dput(cur);
fs/ceph/mds_client.c
2792
dput(cur);
fs/ceph/mds_client.c
2793
cur = parent;
fs/ceph/mds_client.c
2796
if (IS_ROOT(cur))
fs/ceph/mds_client.c
2805
inode = d_inode(cur);
fs/ceph/mds_client.c
2807
dput(cur);
fs/configfs/symlink.c
47
int cur = strlen(config_item_name(p));
fs/configfs/symlink.c
50
length -= cur;
fs/configfs/symlink.c
51
memcpy(buffer + length, config_item_name(p), cur);
fs/coredump.c
160
int cur = cn->used;
fs/coredump.c
173
if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
fs/coredump.c
174
(cn->used - cur == 2 && cn->corename[cur] == '.'
fs/coredump.c
175
&& cn->corename[cur+1] == '.'))
fs/coredump.c
176
cn->corename[cur] = '!';
fs/coredump.c
184
if (cn->used == cur)
fs/coredump.c
188
for (; cur < cn->used; ++cur) {
fs/coredump.c
189
if (cn->corename[cur] == '/')
fs/coredump.c
190
cn->corename[cur] = '!';
fs/efs/inode.c
205
int cur, last, first = 1;
fs/efs/inode.c
230
cur = (last + dirext) % in->numextents;
fs/efs/inode.c
231
if ((result = efs_extent_check(&in->extents[cur], block, sb))) {
fs/efs/inode.c
232
in->lastextent = cur;
fs/efs/inode.c
247
cur = (last + indext) % indexts;
fs/efs/inode.c
257
for(dirext = 0; cur < ibase && dirext < direxts; dirext++) {
fs/efs/inode.c
265
cur, block);
fs/efs/inode.c
272
(cur - ibase) /
fs/efs/inode.c
274
ioffset = (cur - ibase) %
fs/efs/inode.c
298
cur, iblock);
fs/efs/inode.c
305
in->lastextent = cur;
fs/erofs/decompressor.c
295
unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
fs/erofs/decompressor.c
301
cur = bs - (rq->pageofs_out & (bs - 1));
fs/erofs/decompressor.c
302
pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
fs/erofs/decompressor.c
303
cur = min(cur, rq->outputsize);
fs/erofs/decompressor.c
304
if (cur && rq->out[0]) {
fs/erofs/decompressor.c
307
memmove(kin + rq->pageofs_out, kin + pi, cur);
fs/erofs/decompressor.c
310
kin + pi, cur);
fs/erofs/decompressor.c
313
rq->outputsize -= cur;
fs/erofs/decompressor.c
316
for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
fs/erofs/decompressor.c
324
no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
fs/erofs/decompressor.c
325
po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
fs/erofs/fileio.c
100
map->m_la = pos + cur;
fs/erofs/fileio.c
101
map->m_llen = end - cur;
fs/erofs/fileio.c
107
ofs = folio_pos(folio) + cur - map->m_la;
fs/erofs/fileio.c
108
len = min_t(loff_t, map->m_llen - ofs, end - cur);
fs/erofs/fileio.c
119
memcpy_to_folio(folio, cur, src, len);
fs/erofs/fileio.c
122
folio_zero_segment(folio, cur, cur + len);
fs/erofs/fileio.c
145
if (!bio_add_folio(&io->rq->bio, folio, len, cur))
fs/erofs/fileio.c
151
cur += len;
fs/erofs/fileio.c
93
unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
fs/erofs/fileio.c
98
while (cur < end) {
fs/erofs/fileio.c
99
if (!in_range(pos + cur, map->m_la, map->m_llen)) {
fs/erofs/zdata.c
1002
memcpy_to_folio(folio, cur, src, cnt);
fs/erofs/zdata.c
1015
unsigned int end = folio_size(folio), split = 0, cur, pgs;
fs/erofs/zdata.c
1032
cur = offset > map->m_la ? 0 : map->m_la - offset;
fs/erofs/zdata.c
1033
pgs = round_down(cur, PAGE_SIZE);
fs/erofs/zdata.c
1038
folio_zero_segment(folio, cur, end);
fs/erofs/zdata.c
1041
erofs_off_t fpos = offset + cur - map->m_la;
fs/erofs/zdata.c
1043
err = z_erofs_read_fragment(inode->i_sb, folio, cur,
fs/erofs/zdata.c
1044
cur + min(map->m_llen - fpos, end - cur),
fs/erofs/zdata.c
1067
if (cur <= pgs) {
fs/erofs/zdata.c
1069
cur = pgs;
fs/erofs/zdata.c
1090
map->m_llen = offset + cur - map->m_la;
fs/erofs/zdata.c
1092
if (cur <= pgs) {
fs/erofs/zdata.c
1093
split = cur < pgs;
fs/erofs/zdata.c
1096
} while ((end = cur) > 0);
fs/erofs/zdata.c
1159
unsigned int end, cur;
fs/erofs/zdata.c
1163
cur = bvi->bvec.offset < 0 ? -bvi->bvec.offset : 0;
fs/erofs/zdata.c
1167
while (cur < end) {
fs/erofs/zdata.c
1170
pgnr = (bvi->bvec.offset + cur + off0) >> PAGE_SHIFT;
fs/erofs/zdata.c
1173
scur = bvi->bvec.offset + cur -
fs/erofs/zdata.c
1175
len = min_t(unsigned int, end - cur, PAGE_SIZE - scur);
fs/erofs/zdata.c
1178
cur += len;
fs/erofs/zdata.c
1182
memcpy(dst + cur, src + scur, len);
fs/erofs/zdata.c
1184
cur += len;
fs/erofs/zdata.c
138
unsigned int nr, cur;
fs/erofs/zdata.c
160
iter->cur = 0;
fs/erofs/zdata.c
167
unsigned int cur)
fs/erofs/zdata.c
1690
erofs_off_t cur, end;
fs/erofs/zdata.c
1708
cur = mdev.m_pa;
fs/erofs/zdata.c
1709
end = round_up(cur + pcl->pageofs_in + pcl->pclustersize,
fs/erofs/zdata.c
1713
if (bio && (cur != last_pa ||
fs/erofs/zdata.c
1734
if (cur + bvec.bv_len > end)
fs/erofs/zdata.c
1735
bvec.bv_len = end - cur;
fs/erofs/zdata.c
174
while (cur > iter->nr) {
fs/erofs/zdata.c
175
cur -= iter->nr;
fs/erofs/zdata.c
1755
(mdev.m_dif->fsoff + cur) >> 9;
fs/erofs/zdata.c
1765
last_pa = cur + bvec.bv_len;
fs/erofs/zdata.c
1767
} while ((cur += bvec.bv_len) < end);
fs/erofs/zdata.c
178
iter->cur = cur;
fs/erofs/zdata.c
1834
erofs_off_t cur, end, headoffset = f->headoffset;
fs/erofs/zdata.c
1850
cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
fs/erofs/zdata.c
1851
readahead_expand(rac, headoffset, cur - headoffset);
fs/erofs/zdata.c
186
if (iter->cur >= iter->nr) {
fs/erofs/zdata.c
1861
cur = map->m_la + map->m_llen - 1;
fs/erofs/zdata.c
1862
while ((cur >= end) && (cur < i_size_read(inode))) {
fs/erofs/zdata.c
1863
pgoff_t index = cur >> PAGE_SHIFT;
fs/erofs/zdata.c
1875
if (cur < PAGE_SIZE)
fs/erofs/zdata.c
1877
cur = (index << PAGE_SHIFT) - 1;
fs/erofs/zdata.c
203
iter->bvset->bvec[iter->cur++] = *bvec;
fs/erofs/zdata.c
211
if (iter->cur == iter->nr)
fs/erofs/zdata.c
215
*bvec = iter->bvset->bvec[iter->cur++];
fs/erofs/zdata.c
984
unsigned int cur, unsigned int end, erofs_off_t pos)
fs/erofs/zdata.c
995
for (; cur < end; cur += cnt, pos += cnt) {
fs/erofs/zdata.c
996
cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos));
fs/exfat/dir.c
1168
ei->hint_femp.cur.dir = EXFAT_EOF_CLUSTER;
fs/exfat/dir.c
1280
hint_femp.cur = clu;
fs/exfat/dir.c
1312
hint_femp.cur.dir = EXFAT_EOF_CLUSTER;
fs/exfat/dir.c
959
candi_empty->cur = *clu;
fs/exfat/exfat_fs.h
179
struct exfat_chain cur;
fs/exfat/namei.c
229
exfat_chain_dup(&clu, &hint_femp->cur);
fs/exfat/namei.c
266
exfat_chain_set(&hint_femp->cur, EXFAT_EOF_CLUSTER, 0,
fs/exfat/namei.c
269
hint_femp->cur = clu;
fs/exfat/namei.c
370
hint_femp.cur.flags = ALLOC_FAT_CHAIN;
fs/exfat/namei.c
377
if (hint_femp.cur.dir == EXFAT_EOF_CLUSTER)
fs/exfat/namei.c
378
exfat_chain_set(&hint_femp.cur, clu.dir, 0, clu.flags);
fs/exfat/namei.c
382
hint_femp.cur.size++;
fs/ext2/balloc.c
764
ext2_fsblk_t cur;
fs/ext2/balloc.c
769
cur = start_block;
fs/ext2/balloc.c
775
if (cur <= rsv->rsv_end)
fs/ext2/balloc.c
776
cur = rsv->rsv_end + 1;
fs/ext2/balloc.c
787
if (cur > last_block)
fs/ext2/balloc.c
801
if (cur + size <= rsv->rsv_start) {
fs/ext2/balloc.c
830
my_rsv->rsv_start = cur;
fs/ext2/balloc.c
831
my_rsv->rsv_end = cur + size - 1;
fs/ext4/ext4.h
1308
extern void mb_set_bits(void *bm, int cur, int len);
fs/ext4/extents.c
395
ext4_lblk_t cur = 0;
fs/ext4/extents.c
419
if (lblock < cur) {
fs/ext4/extents.c
423
cur = lblock + ext4_ext_get_actual_len(ext);
fs/ext4/extents.c
443
if (lblock < cur) {
fs/ext4/extents.c
449
cur = lblock + 1;
fs/ext4/extents.c
6087
ext4_lblk_t old_cur, cur = 0;
fs/ext4/extents.c
6089
while (cur < end) {
fs/ext4/extents.c
6090
path = ext4_find_extent(inode, cur, NULL, 0);
fs/ext4/extents.c
6099
old_cur = cur;
fs/ext4/extents.c
6100
cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
fs/ext4/extents.c
6101
if (cur <= old_cur)
fs/ext4/extents.c
6102
cur = old_cur + 1;
fs/ext4/extents.c
6113
static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
fs/ext4/extents.c
6118
map.m_lblk = *cur;
fs/ext4/extents.c
6119
map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
fs/ext4/extents.c
6126
*cur = *cur + map.m_len;
fs/ext4/extents.c
6135
ext4_lblk_t cur = 0, end;
fs/ext4/extents.c
6151
cur = 0;
fs/ext4/extents.c
6152
while (cur < end) {
fs/ext4/extents.c
6153
map.m_lblk = cur;
fs/ext4/extents.c
6154
map.m_len = end - cur;
fs/ext4/extents.c
6160
cur = cur + map.m_len;
fs/ext4/extents.c
6170
cur = 0;
fs/ext4/extents.c
6171
ret = skip_hole(inode, &cur);
fs/ext4/extents.c
6174
path = ext4_find_extent(inode, cur, path, 0);
fs/ext4/extents.c
6178
while (cur < end) {
fs/ext4/extents.c
6179
path = ext4_find_extent(inode, cur, path, 0);
fs/ext4/extents.c
6186
cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
fs/ext4/extents.c
6188
ret = skip_hole(inode, &cur);
fs/ext4/extents.c
6192
path2 = ext4_find_extent(inode, cur, path2, 0);
fs/ext4/extents.c
6222
ext4_lblk_t cur = 0, end;
fs/ext4/extents.c
6239
cur = 0;
fs/ext4/extents.c
6240
while (cur < end) {
fs/ext4/extents.c
6241
map.m_lblk = cur;
fs/ext4/extents.c
6242
map.m_len = end - cur;
fs/ext4/extents.c
6262
cur = cur + map.m_len;
fs/ext4/fast_commit.c
1768
ext4_lblk_t start, cur;
fs/ext4/fast_commit.c
1796
cur = start;
fs/ext4/fast_commit.c
1803
map.m_lblk = cur;
fs/ext4/fast_commit.c
1813
path = ext4_find_extent(inode, cur, path, 0);
fs/ext4/fast_commit.c
1817
newex.ee_block = cpu_to_le32(cur);
fs/ext4/fast_commit.c
1819
&newex, start_pblk + cur - start);
fs/ext4/fast_commit.c
1832
if (start_pblk + cur - start != map.m_pblk) {
fs/ext4/fast_commit.c
1838
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
fs/ext4/fast_commit.c
1840
start_pblk + cur - start);
fs/ext4/fast_commit.c
1860
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
fs/ext4/fast_commit.c
1870
cur += map.m_len;
fs/ext4/fast_commit.c
1889
ext4_lblk_t cur, remaining;
fs/ext4/fast_commit.c
1893
cur = le32_to_cpu(lrange.fc_lblk);
fs/ext4/fast_commit.c
1897
le32_to_cpu(lrange.fc_ino), cur, remaining);
fs/ext4/fast_commit.c
1913
map.m_lblk = cur;
fs/ext4/fast_commit.c
1921
cur += ret;
fs/ext4/fast_commit.c
1925
cur += map.m_len;
fs/ext4/fast_commit.c
1951
ext4_lblk_t cur, end;
fs/ext4/fast_commit.c
1962
cur = 0;
fs/ext4/fast_commit.c
1968
while (cur < end) {
fs/ext4/fast_commit.c
1969
map.m_lblk = cur;
fs/ext4/fast_commit.c
1970
map.m_len = end - cur;
fs/ext4/fast_commit.c
1985
cur += ret;
fs/ext4/fast_commit.c
1989
cur = cur + (map.m_len ? map.m_len : 1);
fs/ext4/fast_commit.c
2085
__u8 *start, *end, *cur, *val;
fs/ext4/fast_commit.c
2113
for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
fs/ext4/fast_commit.c
2114
cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
fs/ext4/fast_commit.c
2115
ext4_fc_get_tl(&tl, cur);
fs/ext4/fast_commit.c
2116
val = cur + EXT4_FC_TAG_BASE_LEN;
fs/ext4/fast_commit.c
2144
state->fc_crc = ext4_chksum(state->fc_crc, cur,
fs/ext4/fast_commit.c
2150
state->fc_crc = ext4_chksum(state->fc_crc, cur,
fs/ext4/fast_commit.c
2177
state->fc_crc = ext4_chksum(state->fc_crc, cur,
fs/ext4/fast_commit.c
2203
__u8 *start, *end, *cur, *val;
fs/ext4/fast_commit.c
2233
for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
fs/ext4/fast_commit.c
2234
cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
fs/ext4/fast_commit.c
2235
ext4_fc_get_tl(&tl, cur);
fs/ext4/fast_commit.c
2236
val = cur + EXT4_FC_TAG_BASE_LEN;
fs/ext4/mballoc.c
1865
static void mb_clear_bits(void *bm, int cur, int len)
fs/ext4/mballoc.c
1869
len = cur + len;
fs/ext4/mballoc.c
1870
while (cur < len) {
fs/ext4/mballoc.c
1871
if ((cur & 31) == 0 && (len - cur) >= 32) {
fs/ext4/mballoc.c
1873
addr = bm + (cur >> 3);
fs/ext4/mballoc.c
1875
cur += 32;
fs/ext4/mballoc.c
1878
mb_clear_bit(cur, bm);
fs/ext4/mballoc.c
1879
cur++;
fs/ext4/mballoc.c
1886
static int mb_test_and_clear_bits(void *bm, int cur, int len)
fs/ext4/mballoc.c
1891
len = cur + len;
fs/ext4/mballoc.c
1892
while (cur < len) {
fs/ext4/mballoc.c
1893
if ((cur & 31) == 0 && (len - cur) >= 32) {
fs/ext4/mballoc.c
1895
addr = bm + (cur >> 3);
fs/ext4/mballoc.c
1897
zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
fs/ext4/mballoc.c
1899
cur += 32;
fs/ext4/mballoc.c
1902
if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
fs/ext4/mballoc.c
1903
zero_bit = cur;
fs/ext4/mballoc.c
1904
cur++;
fs/ext4/mballoc.c
1910
void mb_set_bits(void *bm, int cur, int len)
fs/ext4/mballoc.c
1914
len = cur + len;
fs/ext4/mballoc.c
1915
while (cur < len) {
fs/ext4/mballoc.c
1916
if ((cur & 31) == 0 && (len - cur) >= 32) {
fs/ext4/mballoc.c
1918
addr = bm + (cur >> 3);
fs/ext4/mballoc.c
1920
cur += 32;
fs/ext4/mballoc.c
1923
mb_set_bit(cur, bm);
fs/ext4/mballoc.c
1924
cur++;
fs/ext4/mballoc.c
3874
struct list_head *cur, *tmp;
fs/ext4/mballoc.c
3877
list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
fs/ext4/mballoc.c
3878
pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
fs/ext4/mballoc.c
5092
struct list_head *cur;
fs/ext4/mballoc.c
5109
list_for_each(cur, &grp->bb_prealloc_list) {
fs/ext4/mballoc.c
5110
pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
fs/ext4/mballoc.c
5764
struct list_head *cur;
fs/ext4/mballoc.c
5769
list_for_each(cur, &grp->bb_prealloc_list) {
fs/ext4/mballoc.c
5770
pa = list_entry(cur, struct ext4_prealloc_space,
fs/ext4/mballoc.c
717
struct list_head *cur;
fs/ext4/mballoc.c
7192
void mb_clear_bits_test(void *bm, int cur, int len)
fs/ext4/mballoc.c
7194
mb_clear_bits(bm, cur, len);
fs/ext4/mballoc.c
782
list_for_each(cur, &grp->bb_prealloc_list) {
fs/ext4/mballoc.c
785
pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
fs/ext4/mballoc.h
279
extern void mb_clear_bits_test(void *bm, int cur, int len);
fs/ext4/page-io.c
218
struct list_head *cur, *before, *after;
fs/ext4/page-io.c
226
cur = &io_end->list;
fs/ext4/page-io.c
227
before = cur->prev;
fs/ext4/page-io.c
229
after = cur->next;
fs/ext4/super.c
3825
unsigned long next_wakeup, cur;
fs/ext4/super.c
3889
cur = jiffies;
fs/ext4/super.c
3890
if (!next_wakeup_initialized || time_after_eq(cur, next_wakeup)) {
fs/ext4/super.c
3895
schedule_timeout_interruptible(next_wakeup - cur);
fs/f2fs/extent_cache.c
170
static bool __is_back_mergeable(struct extent_info *cur,
fs/f2fs/extent_cache.c
173
return __is_extent_mergeable(back, cur, type);
fs/f2fs/extent_cache.c
176
static bool __is_front_mergeable(struct extent_info *cur,
fs/f2fs/extent_cache.c
179
return __is_extent_mergeable(cur, front, type);
fs/f2fs/f2fs.h
1035
static inline bool __is_discard_back_mergeable(struct discard_info *cur,
fs/f2fs/f2fs.h
1038
return __is_discard_mergeable(back, cur, max_len);
fs/f2fs/f2fs.h
1041
static inline bool __is_discard_front_mergeable(struct discard_info *cur,
fs/f2fs/f2fs.h
1044
return __is_discard_mergeable(cur, front, max_len);
fs/f2fs/f2fs.h
4394
int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \
fs/f2fs/f2fs.h
4396
if (cur > max) \
fs/f2fs/f2fs.h
4397
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
fs/f2fs/gc.c
433
struct rb_node *cur = rb_first_cached(root), *next;
fs/f2fs/gc.c
436
while (cur) {
fs/f2fs/gc.c
437
next = rb_next(cur);
fs/f2fs/gc.c
441
cur_ve = rb_entry(cur, struct victim_entry, rb_node);
fs/f2fs/gc.c
450
cur = next;
fs/f2fs/node.c
3050
struct nat_entry_set *cur;
fs/f2fs/node.c
3055
list_for_each_entry(cur, head, set_list) {
fs/f2fs/node.c
3056
if (cur->entry_cnt >= nes->entry_cnt) {
fs/f2fs/node.c
3057
list_add(&nes->set_list, cur->set_list.prev);
fs/f2fs/node.c
3105
struct nat_entry *ne, *cur;
fs/f2fs/node.c
3129
list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
fs/f2fs/segment.c
1002
struct rb_node *cur = rb_first_cached(&dcc->root), *next;
fs/f2fs/segment.c
1005
while (cur) {
fs/f2fs/segment.c
1006
next = rb_next(cur);
fs/f2fs/segment.c
1010
cur_dc = rb_entry(cur, struct discard_cmd, rb_node);
fs/f2fs/segment.c
1020
cur = next;
fs/f2fs/segment.c
287
struct revoke_entry *cur, *tmp;
fs/f2fs/segment.c
291
list_for_each_entry_safe(cur, tmp, head, list) {
fs/f2fs/segment.c
293
__replace_atomic_write_block(inode, cur->index,
fs/f2fs/segment.c
294
cur->old_addr, NULL, true);
fs/f2fs/segment.c
296
f2fs_truncate_hole(inode, start_index, cur->index);
fs/f2fs/segment.c
297
start_index = cur->index + 1;
fs/f2fs/segment.c
300
list_del(&cur->list);
fs/f2fs/segment.c
301
kmem_cache_free(revoke_entry_slab, cur);
fs/fat/fatent.c
638
sector_t cur;
fs/fat/fatent.c
672
ra->cur = 0;
fs/fat/fatent.c
677
ra->ra_advance = ra->cur;
fs/fat/fatent.c
678
ra->ra_next = ra->cur;
fs/fat/fatent.c
679
ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
fs/fat/fatent.c
689
if (ra->cur >= ra->ra_advance) {
fs/fat/fatent.c
698
diff = blocknr - ra->cur;
fs/fat/fatent.c
713
ra->cur++;
fs/fuse/dir.c
130
struct fuse_dentry *cur;
fs/fuse/dir.c
145
cur = rb_entry(*p, struct fuse_dentry, node);
fs/fuse/dir.c
146
if (fd->time < cur->time)
fs/gfs2/dir.c
640
struct gfs2_dirent *cur = *dent, *tmp;
fs/gfs2/dir.c
644
ret = dirent_check_reclen(dip, cur, bh_end);
fs/gfs2/dir.c
648
tmp = (void *)cur + ret;
fs/gfs2/dir.c
673
struct gfs2_dirent *prev, struct gfs2_dirent *cur)
fs/gfs2/dir.c
677
if (gfs2_dirent_sentinel(cur)) {
fs/gfs2/dir.c
689
cur->de_inum.no_addr = 0;
fs/gfs2/dir.c
690
cur->de_inum.no_formal_ino = 0;
fs/gfs2/dir.c
697
cur_rec_len = be16_to_cpu(cur->de_rec_len);
fs/gfs2/dir.c
699
if ((char *)prev + prev_rec_len != (char *)cur)
fs/gfs2/dir.c
701
if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size)
fs/gfs2/lock_dlm.c
225
static bool middle_conversion(int cur, int req)
fs/gfs2/lock_dlm.c
227
return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) ||
fs/gfs2/lock_dlm.c
228
(cur == DLM_LOCK_CW && req == DLM_LOCK_PR);
fs/gfs2/lock_dlm.c
231
static bool down_conversion(int cur, int req)
fs/gfs2/lock_dlm.c
233
return !middle_conversion(cur, req) && req < cur;
fs/gfs2/lock_dlm.c
282
int cur, req;
fs/gfs2/lock_dlm.c
288
cur = make_mode(glock_sbd(gl), gl->gl_state);
fs/gfs2/lock_dlm.c
290
blocking = !down_conversion(cur, req) &&
fs/gfs2/rgrp.c
1500
struct gfs2_blkreserv *cur =
fs/gfs2/rgrp.c
1504
rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
fs/gfs2/rgrp.c
527
struct gfs2_rgrpd *cur;
fs/gfs2/rgrp.c
532
cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
fs/gfs2/rgrp.c
534
if (blk < cur->rd_addr)
fs/gfs2/rgrp.c
536
else if (blk >= cur->rd_data0 + cur->rd_data)
fs/gfs2/rgrp.c
541
if (blk < cur->rd_addr)
fs/gfs2/rgrp.c
543
if (blk >= cur->rd_data0 + cur->rd_data)
fs/gfs2/rgrp.c
546
return cur;
fs/gfs2/rgrp.c
870
struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
fs/gfs2/rgrp.c
874
if (rgd->rd_addr < cur->rd_addr)
fs/gfs2/rgrp.c
876
else if (rgd->rd_addr > cur->rd_addr)
fs/inode.c
2835
u32 cns, cur;
fs/inode.c
2869
cur = cns;
fs/inode.c
2872
if (try_cmpxchg(&inode->i_ctime_nsec, &cur, now.tv_nsec)) {
fs/inode.c
2875
trace_ctime_ns_xchg(inode, cns, now.tv_nsec, cur);
fs/inode.c
2884
if (!(cns & I_CTIME_QUERIED) && (cns | I_CTIME_QUERIED) == cur) {
fs/inode.c
2885
cns = cur;
fs/inode.c
2890
now.tv_nsec = cur & ~I_CTIME_QUERIED;
fs/inode.c
2917
u32 cur, old;
fs/inode.c
2920
cur = smp_load_acquire(&inode->i_ctime_nsec);
fs/inode.c
2921
cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
fs/inode.c
2946
old = cur;
fs/inode.c
2947
if (try_cmpxchg(&inode->i_ctime_nsec, &cur, update.tv_nsec)) {
fs/inode.c
2960
if (!(old & I_CTIME_QUERIED) && (cur == (old | I_CTIME_QUERIED)))
fs/inode.c
2965
cur_ts.tv_nsec = cur & ~I_CTIME_QUERIED;
fs/libfs.c
2012
u64 cur, new;
fs/libfs.c
2026
cur = inode_peek_iversion_raw(inode);
fs/libfs.c
2027
if (!force && !(cur & I_VERSION_QUERIED)) {
fs/libfs.c
2029
cur = inode_peek_iversion_raw(inode);
fs/libfs.c
2034
if (!force && !(cur & I_VERSION_QUERIED))
fs/libfs.c
2038
new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
fs/libfs.c
2039
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
fs/libfs.c
2059
u64 cur, new;
fs/libfs.c
2066
cur = inode_peek_iversion_raw(inode);
fs/libfs.c
2069
if (cur & I_VERSION_QUERIED) {
fs/libfs.c
2076
new = cur | I_VERSION_QUERIED;
fs/libfs.c
2077
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
fs/libfs.c
2078
return cur >> I_VERSION_QUERIED_SHIFT;
fs/locks.c
2937
struct file_lock_core *cur, *tmp;
fs/locks.c
2941
cur = hlist_entry(v, struct file_lock_core, flc_link);
fs/locks.c
2943
if (locks_translate_pid(cur, proc_pidns) == 0)
fs/locks.c
2951
while (cur != NULL) {
fs/locks.c
2953
lock_get_status(f, cur, iter->li_pos, "-> ", level);
fs/locks.c
2955
lock_get_status(f, cur, iter->li_pos, "", level);
fs/locks.c
2957
if (!list_empty(&cur->flc_blocked_requests)) {
fs/locks.c
2959
cur = list_first_entry_or_null(&cur->flc_blocked_requests,
fs/locks.c
2965
tmp = get_next_blocked_member(cur);
fs/locks.c
2967
while (tmp == NULL && cur->flc_blocker != NULL) {
fs/locks.c
2968
cur = cur->flc_blocker;
fs/locks.c
2970
tmp = get_next_blocked_member(cur);
fs/locks.c
2972
cur = tmp;
fs/nfsd/nfs4state.c
276
struct nfsd4_blocked_lock *cur, *found = NULL;
fs/nfsd/nfs4state.c
279
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
fs/nfsd/nfs4state.c
280
if (fh_match(fh, &cur->nbl_fh)) {
fs/nfsd/nfs4state.c
281
list_del_init(&cur->nbl_list);
fs/nfsd/nfs4state.c
282
WARN_ON(list_empty(&cur->nbl_lru));
fs/nfsd/nfs4state.c
283
list_del_init(&cur->nbl_lru);
fs/nfsd/nfs4state.c
284
found = cur;
fs/nfsd/nfs4xdr.c
2842
struct path cur = *path;
fs/nfsd/nfs4xdr.c
2849
path_get(&cur);
fs/nfsd/nfs4xdr.c
2854
if (path_equal(&cur, root))
fs/nfsd/nfs4xdr.c
2856
if (cur.dentry == cur.mnt->mnt_root) {
fs/nfsd/nfs4xdr.c
2857
if (follow_up(&cur))
fs/nfsd/nfs4xdr.c
2870
components[ncomponents++] = cur.dentry;
fs/nfsd/nfs4xdr.c
2871
cur.dentry = dget_parent(cur.dentry);
fs/nfsd/nfs4xdr.c
2898
path_put(&cur);
fs/pipe.c
1102
int cur = *cnt;
fs/pipe.c
1104
while (cur == *cnt) {
fs/pipe.c
1113
return cur == *cnt ? -ERESTARTSYS : 0;
fs/proc/vmcore.c
1597
struct vmcore_range *cur;
fs/proc/vmcore.c
1627
list_for_each_entry(cur, list, list) {
fs/proc/vmcore.c
1628
WARN_ON_ONCE(!IS_ALIGNED(cur->paddr | cur->size, PAGE_SIZE));
fs/proc/vmcore.c
1629
elfcorehdr_fill_device_ram_ptload_elf64(phdr, cur->paddr, cur->size);
fs/smb/client/compress/lz77.c
51
static __always_inline u32 lz77_match_len(const void *wnd, const void *cur, const void *end)
fs/smb/client/compress/lz77.c
53
const void *start = cur;
fs/smb/client/compress/lz77.c
58
diff = lz77_read64(cur) ^ lz77_read64(wnd);
fs/smb/client/compress/lz77.c
60
cur += LZ77_STEP_SIZE;
fs/smb/client/compress/lz77.c
67
cur += count_trailing_zeros(diff) >> 3;
fs/smb/client/compress/lz77.c
69
return (cur - start);
fs/smb/client/compress/lz77.c
70
} while (likely(cur + LZ77_STEP_SIZE < end));
fs/smb/client/compress/lz77.c
72
while (cur < end && lz77_read8(cur++) == lz77_read8(wnd++))
fs/smb/client/compress/lz77.c
75
return (cur - start);
fs/smb/client/file.c
1635
struct cifs_fid_locks *cur;
fs/smb/client/file.c
1638
list_for_each_entry(cur, &cinode->llist, llist) {
fs/smb/client/file.c
1639
rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
fs/smb/client/file.c
1810
LOCKING_ANDX_RANGE *buf, *cur;
fs/smb/client/file.c
1843
cur = buf;
fs/smb/client/file.c
1848
cur->Pid = cpu_to_le16(li->pid);
fs/smb/client/file.c
1849
cur->LengthLow = cpu_to_le32((u32)li->length);
fs/smb/client/file.c
1850
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
fs/smb/client/file.c
1851
cur->OffsetLow = cpu_to_le32((u32)li->offset);
fs/smb/client/file.c
1852
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
fs/smb/client/file.c
1860
cur = buf;
fs/smb/client/file.c
1863
cur++;
fs/smb/client/file.c
2191
LOCKING_ANDX_RANGE *buf, *cur;
fs/smb/client/file.c
2218
cur = buf;
fs/smb/client/file.c
2239
cur->Pid = cpu_to_le16(li->pid);
fs/smb/client/file.c
2240
cur->LengthLow = cpu_to_le32((u32)li->length);
fs/smb/client/file.c
2241
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
fs/smb/client/file.c
2242
cur->OffsetLow = cpu_to_le32((u32)li->offset);
fs/smb/client/file.c
2243
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
fs/smb/client/file.c
2269
cur = buf;
fs/smb/client/file.c
2272
cur++;
fs/smb/client/file.c
633
struct cifs_fid_locks *cur;
fs/smb/client/file.c
637
list_for_each_entry(cur, &cinode->llist, llist) {
fs/smb/client/file.c
638
if (!list_empty(&cur->locks)) {
fs/smb/client/smb2file.c
266
struct smb2_lock_element *buf, *cur;
fs/smb/client/smb2file.c
288
cur = buf;
fs/smb/client/smb2file.c
313
cur->Length = cpu_to_le64(li->length);
fs/smb/client/smb2file.c
314
cur->Offset = cpu_to_le64(li->offset);
fs/smb/client/smb2file.c
315
cur->Flags = cpu_to_le32(SMB2_LOCKFLAG_UNLOCK);
fs/smb/client/smb2file.c
341
cur = buf;
fs/smb/client/smb2file.c
344
cur++;
fs/smb/client/smb2file.c
370
struct smb2_lock_element *cur = buf;
fs/smb/client/smb2file.c
374
cur->Length = cpu_to_le64(li->length);
fs/smb/client/smb2file.c
375
cur->Offset = cpu_to_le64(li->offset);
fs/smb/client/smb2file.c
376
cur->Flags = cpu_to_le32(li->type |
fs/smb/client/smb2file.c
385
cur = buf;
fs/smb/client/smb2file.c
388
cur++;
fs/smb/client/transport.c
826
int i, start, cur;
fs/smb/client/transport.c
834
cur = (start + i) % ses->chan_count;
fs/smb/client/transport.c
835
server = ses->chans[cur].server;
fs/smb/client/transport.c
839
if (CIFS_CHAN_NEEDS_RECONNECT(ses, cur))
fs/smb/client/transport.c
852
index = cur;
fs/ubifs/debug.c
2330
struct list_head *cur;
fs/ubifs/debug.c
2336
for (cur = head->next; cur->next != head; cur = cur->next) {
fs/ubifs/debug.c
2341
sa = container_of(cur, struct ubifs_scan_node, list);
fs/ubifs/debug.c
2342
sb = container_of(cur->next, struct ubifs_scan_node, list);
fs/ubifs/debug.c
2397
struct list_head *cur;
fs/ubifs/debug.c
2403
for (cur = head->next; cur->next != head; cur = cur->next) {
fs/ubifs/debug.c
2408
sa = container_of(cur, struct ubifs_scan_node, list);
fs/ubifs/debug.c
2409
sb = container_of(cur->next, struct ubifs_scan_node, list);
fs/unicode/utf8-core.c
102
struct utf8cursor cur;
fs/unicode/utf8-core.c
105
if (utf8ncursor(&cur, um, UTF8_NFDICF, str->name, str->len) < 0)
fs/unicode/utf8-core.c
109
int c = utf8byte(&cur);
fs/unicode/utf8-core.c
124
struct utf8cursor cur;
fs/unicode/utf8-core.c
128
if (utf8ncursor(&cur, um, UTF8_NFDICF, str->name, str->len) < 0)
fs/unicode/utf8-core.c
131
while ((c = utf8byte(&cur))) {
fs/unicode/utf8-core.c
144
struct utf8cursor cur;
fs/unicode/utf8-core.c
147
if (utf8ncursor(&cur, um, UTF8_NFDI, str->name, str->len) < 0)
fs/unicode/utf8-core.c
151
int c = utf8byte(&cur);
fs/userfaultfd.c
1265
struct vm_area_struct *vma, *cur;
fs/userfaultfd.c
1339
cur = vma;
fs/userfaultfd.c
1343
VM_WARN_ON_ONCE(!!cur->vm_userfaultfd_ctx.ctx ^
fs/userfaultfd.c
1344
!!(cur->vm_flags & __VM_UFFD_FLAGS));
fs/userfaultfd.c
1348
if (!vma_can_userfault(cur, vm_flags, wp_async))
fs/userfaultfd.c
1360
if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
fs/userfaultfd.c
1367
if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
fs/userfaultfd.c
1368
end > cur->vm_start) {
fs/userfaultfd.c
1369
unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
fs/userfaultfd.c
1376
if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
fs/userfaultfd.c
1386
if (cur->vm_userfaultfd_ctx.ctx &&
fs/userfaultfd.c
1387
cur->vm_userfaultfd_ctx.ctx != ctx)
fs/userfaultfd.c
1393
if (is_vm_hugetlb_page(cur))
fs/userfaultfd.c
1397
} for_each_vma_range(vmi, cur, end);
fs/userfaultfd.c
1439
struct vm_area_struct *vma, *prev, *cur;
fs/userfaultfd.c
1486
cur = vma;
fs/userfaultfd.c
1490
VM_WARN_ON_ONCE(!!cur->vm_userfaultfd_ctx.ctx ^
fs/userfaultfd.c
1491
!!(cur->vm_flags & __VM_UFFD_FLAGS));
fs/userfaultfd.c
1497
if (cur->vm_userfaultfd_ctx.ctx &&
fs/userfaultfd.c
1498
cur->vm_userfaultfd_ctx.ctx != ctx)
fs/userfaultfd.c
1508
if (!vma_can_userfault(cur, cur->vm_flags, wp_async))
fs/userfaultfd.c
1512
} for_each_vma_range(vmi, cur, end);
fs/vboxsf/dir.c
102
loff_t i, cur = 0;
fs/vboxsf/dir.c
109
if (ctx->pos >= cur + b->entries) {
fs/vboxsf/dir.c
110
cur += b->entries;
fs/vboxsf/dir.c
121
for (i = 0, info = b->buf; i < ctx->pos - cur; i++) {
fs/verity/enable.c
23
struct block_buffer *cur)
fs/verity/enable.c
25
struct block_buffer *next = cur + 1;
fs/verity/enable.c
36
memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
fs/verity/enable.c
38
fsverity_hash_block(params, cur->data, &next->data[next->filled]);
fs/verity/enable.c
40
cur->filled = 0;
fs/xfs/libxfs/xfs_alloc.c
1011
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
1020
bool isbnobt = xfs_btree_is_bno(cur->bc_ops);
fs/xfs/libxfs/xfs_alloc.c
1024
error = xfs_alloc_get_rec(cur, &bno, &len, &i);
fs/xfs/libxfs/xfs_alloc.c
1028
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_alloc.c
1096
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
fs/xfs/libxfs/xfs_alloc.c
1097
trace_xfs_alloc_cur_check(cur, bno, len, diff, *new);
fs/xfs/libxfs/xfs_alloc.c
1139
struct xfs_btree_cur *cur = acur->cnt;
fs/xfs/libxfs/xfs_alloc.c
1145
if (!xfs_alloc_cur_active(cur))
fs/xfs/libxfs/xfs_alloc.c
1150
error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
fs/xfs/libxfs/xfs_alloc.c
1155
error = xfs_alloc_get_rec(cur, &bno, &len, &i);
fs/xfs/libxfs/xfs_alloc.c
1160
error = xfs_alloc_cur_check(args, acur, cur, &i);
fs/xfs/libxfs/xfs_alloc.c
1174
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_alloc.c
1176
error = xfs_alloc_get_rec(cur, &bno, &len, &i);
fs/xfs/libxfs/xfs_alloc.c
1178
error = xfs_alloc_cur_check(args, acur, cur,
fs/xfs/libxfs/xfs_alloc.c
1434
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
1450
while (xfs_alloc_cur_active(cur) && count) {
fs/xfs/libxfs/xfs_alloc.c
1451
error = xfs_alloc_cur_check(args, acur, cur, &i);
fs/xfs/libxfs/xfs_alloc.c
1459
if (!xfs_alloc_cur_active(cur))
fs/xfs/libxfs/xfs_alloc.c
1463
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_alloc.c
1465
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_alloc.c
1469
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
fs/xfs/libxfs/xfs_alloc.c
156
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
164
cur->bc_rec.a.ar_startblock = bno;
fs/xfs/libxfs/xfs_alloc.c
165
cur->bc_rec.a.ar_blockcount = len;
fs/xfs/libxfs/xfs_alloc.c
166
error = xfs_btree_lookup(cur, dir, stat);
fs/xfs/libxfs/xfs_alloc.c
168
cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
fs/xfs/libxfs/xfs_alloc.c
170
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
fs/xfs/libxfs/xfs_alloc.c
179
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.c
184
return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
fs/xfs/libxfs/xfs_alloc.c
193
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.c
198
return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
fs/xfs/libxfs/xfs_alloc.c
207
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.c
212
return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
fs/xfs/libxfs/xfs_alloc.c
217
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_alloc.c
219
return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE);
fs/xfs/libxfs/xfs_alloc.c
229
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.c
237
return xfs_btree_update(cur, &rec);
fs/xfs/libxfs/xfs_alloc.c
268
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
272
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_alloc.c
276
cur->bc_ops->name, cur->bc_group->xg_gno, fa);
fs/xfs/libxfs/xfs_alloc.c
280
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_alloc.c
289
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.c
299
error = xfs_btree_get_rec(cur, &rec, stat);
fs/xfs/libxfs/xfs_alloc.c
304
fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec);
fs/xfs/libxfs/xfs_alloc.c
306
return xfs_alloc_complain_bad_rec(cur, fa, &irec);
fs/xfs/libxfs/xfs_alloc.c
4069
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
4078
fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec);
fs/xfs/libxfs/xfs_alloc.c
4080
return xfs_alloc_complain_bad_rec(cur, fa, &irec);
fs/xfs/libxfs/xfs_alloc.c
4082
return query->fn(cur, &irec, query->priv);
fs/xfs/libxfs/xfs_alloc.c
4088
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
4098
ASSERT(xfs_btree_is_bno(cur->bc_ops));
fs/xfs/libxfs/xfs_alloc.c
4099
return xfs_btree_query_range(cur, &low_brec, &high_brec,
fs/xfs/libxfs/xfs_alloc.c
4106
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
4112
ASSERT(xfs_btree_is_bno(cur->bc_ops));
fs/xfs/libxfs/xfs_alloc.c
4115
return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
fs/xfs/libxfs/xfs_alloc.c
4124
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.c
4137
return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
fs/xfs/libxfs/xfs_alloc.h
167
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.h
174
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.h
181
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_alloc.h
205
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.h
209
int xfs_alloc_query_range(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc.h
213
int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
fs/xfs/libxfs/xfs_alloc.h
216
int xfs_alloc_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_alloc_btree.c
102
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_alloc_btree.c
106
bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
fs/xfs/libxfs/xfs_alloc_btree.c
107
error = xfs_alloc_put_freelist(to_perag(cur->bc_group), cur->bc_tp,
fs/xfs/libxfs/xfs_alloc_btree.c
112
atomic64_dec(&cur->bc_mp->m_allocbt_blks);
fs/xfs/libxfs/xfs_alloc_btree.c
113
xfs_extent_busy_insert(cur->bc_tp, pag_group(agbp->b_pag), bno, 1,
fs/xfs/libxfs/xfs_alloc_btree.c
120
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
123
return cur->bc_mp->m_alloc_mnr[level != 0];
fs/xfs/libxfs/xfs_alloc_btree.c
128
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
131
return cur->bc_mp->m_alloc_mxr[level != 0];
fs/xfs/libxfs/xfs_alloc_btree.c
167
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
170
rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
fs/xfs/libxfs/xfs_alloc_btree.c
171
rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
fs/xfs/libxfs/xfs_alloc_btree.c
176
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
179
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
fs/xfs/libxfs/xfs_alloc_btree.c
181
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno));
fs/xfs/libxfs/xfs_alloc_btree.c
183
if (xfs_btree_is_bno(cur->bc_ops))
fs/xfs/libxfs/xfs_alloc_btree.c
191
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
194
struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
fs/xfs/libxfs/xfs_alloc_btree.c
203
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
206
struct xfs_alloc_rec_incore *rec = &cur->bc_rec.a;
fs/xfs/libxfs/xfs_alloc_btree.c
215
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
228
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
28
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_alloc_btree.c
30
return xfs_bnobt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
fs/xfs/libxfs/xfs_alloc_btree.c
31
to_perag(cur->bc_group));
fs/xfs/libxfs/xfs_alloc_btree.c
353
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
36
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_alloc_btree.c
363
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
374
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
38
return xfs_cntbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ag.agbp,
fs/xfs/libxfs/xfs_alloc_btree.c
387
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
39
to_perag(cur->bc_group));
fs/xfs/libxfs/xfs_alloc_btree.c
400
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
44
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
48
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_alloc_btree.c
483
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_alloc_btree.c
485
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bnobt_ops,
fs/xfs/libxfs/xfs_alloc_btree.c
487
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_alloc_btree.c
488
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_alloc_btree.c
49
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_alloc_btree.c
492
cur->bc_nlevels = be32_to_cpu(agf->agf_bno_level);
fs/xfs/libxfs/xfs_alloc_btree.c
494
return cur;
fs/xfs/libxfs/xfs_alloc_btree.c
509
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_alloc_btree.c
511
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_cntbt_ops,
fs/xfs/libxfs/xfs_alloc_btree.c
513
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_alloc_btree.c
514
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_alloc_btree.c
518
cur->bc_nlevels = be32_to_cpu(agf->agf_cnt_level);
fs/xfs/libxfs/xfs_alloc_btree.c
520
return cur;
fs/xfs/libxfs/xfs_alloc_btree.c
529
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
534
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
fs/xfs/libxfs/xfs_alloc_btree.c
536
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_alloc_btree.c
538
if (xfs_btree_is_bno(cur->bc_ops)) {
fs/xfs/libxfs/xfs_alloc_btree.c
54
if (xfs_btree_is_bno(cur->bc_ops)) {
fs/xfs/libxfs/xfs_alloc_btree.c
547
xfs_btree_commit_afakeroot(cur, tp, agbp);
fs/xfs/libxfs/xfs_alloc_btree.c
64
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
fs/xfs/libxfs/xfs_alloc_btree.c
69
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.c
78
error = xfs_alloc_get_freelist(to_perag(cur->bc_group), cur->bc_tp,
fs/xfs/libxfs/xfs_alloc_btree.c
79
cur->bc_ag.agbp, &bno, 1);
fs/xfs/libxfs/xfs_alloc_btree.c
88
atomic64_inc(&cur->bc_mp->m_allocbt_blks);
fs/xfs/libxfs/xfs_alloc_btree.c
89
xfs_extent_busy_reuse(cur->bc_group, bno, 1, false);
fs/xfs/libxfs/xfs_alloc_btree.c
99
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_alloc_btree.h
61
void xfs_allocbt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
112
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
1125
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
1130
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_bmap.c
1131
struct xfs_inode *ip = cur->bc_ino.ip;
fs/xfs/libxfs/xfs_bmap.c
1137
int whichfork = cur->bc_ino.whichfork;
fs/xfs/libxfs/xfs_bmap.c
1140
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_bmap.c
116
cur->bc_rec.b = *irec;
fs/xfs/libxfs/xfs_bmap.c
117
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
fs/xfs/libxfs/xfs_bmap.c
1191
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_bmap.c
1201
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
1202
error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
fs/xfs/libxfs/xfs_bmap.c
1204
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.c
122
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
125
cur->bc_rec.b.br_startoff = 0;
fs/xfs/libxfs/xfs_bmap.c
126
cur->bc_rec.b.br_startblock = 0;
fs/xfs/libxfs/xfs_bmap.c
127
cur->bc_rec.b.br_blockcount = 0;
fs/xfs/libxfs/xfs_bmap.c
128
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
fs/xfs/libxfs/xfs_bmap.c
1461
ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
fs/xfs/libxfs/xfs_bmap.c
1553
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1557
error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
fs/xfs/libxfs/xfs_bmap.c
1561
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1565
error = xfs_btree_delete(bma->cur, &i);
fs/xfs/libxfs/xfs_bmap.c
1569
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1573
error = xfs_btree_decrement(bma->cur, 0, &i);
fs/xfs/libxfs/xfs_bmap.c
1577
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1581
error = xfs_bmbt_update(bma->cur, &LEFT);
fs/xfs/libxfs/xfs_bmap.c
1600
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1604
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
1608
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
161
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
1612
error = xfs_bmbt_update(bma->cur, &LEFT);
fs/xfs/libxfs/xfs_bmap.c
1635
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1639
error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
fs/xfs/libxfs/xfs_bmap.c
1643
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1647
error = xfs_bmbt_update(bma->cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
1665
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1669
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
167
return xfs_btree_update(cur, &rec);
fs/xfs/libxfs/xfs_bmap.c
1673
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1677
error = xfs_btree_insert(bma->cur, &i);
fs/xfs/libxfs/xfs_bmap.c
1681
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1709
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1713
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
1717
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1721
error = xfs_bmbt_update(bma->cur, &LEFT);
fs/xfs/libxfs/xfs_bmap.c
1736
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1740
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
1744
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1748
error = xfs_btree_insert(bma->cur, &i);
fs/xfs/libxfs/xfs_bmap.c
1752
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1760
&bma->cur, 1, &tmp_rval, whichfork);
fs/xfs/libxfs/xfs_bmap.c
1769
(bma->cur ? bma->cur->bc_bmap.allocated : 0));
fs/xfs/libxfs/xfs_bmap.c
1789
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1793
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
1797
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1801
error = xfs_bmbt_update(bma->cur, &RIGHT);
fs/xfs/libxfs/xfs_bmap.c
1827
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1831
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
1835
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1839
error = xfs_btree_insert(bma->cur, &i);
fs/xfs/libxfs/xfs_bmap.c
1843
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1851
&bma->cur, 1, &tmp_rval, whichfork);
fs/xfs/libxfs/xfs_bmap.c
1860
(bma->cur ? bma->cur->bc_bmap.allocated : 0));
fs/xfs/libxfs/xfs_bmap.c
1916
if (bma->cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
1920
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
1924
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1928
error = xfs_btree_insert(bma->cur, &i);
fs/xfs/libxfs/xfs_bmap.c
1932
xfs_btree_mark_sick(bma->cur);
fs/xfs/libxfs/xfs_bmap.c
1940
&bma->cur, 1, &tmp_rval, whichfork);
fs/xfs/libxfs/xfs_bmap.c
1971
ASSERT(bma->cur == NULL);
fs/xfs/libxfs/xfs_bmap.c
1973
&bma->cur, da_old > 0, &tmp_logflags,
fs/xfs/libxfs/xfs_bmap.c
1983
if (bma->cur) {
fs/xfs/libxfs/xfs_bmap.c
1984
da_new += bma->cur->bc_bmap.allocated;
fs/xfs/libxfs/xfs_bmap.c
1985
bma->cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
1994
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
2017
struct xfs_btree_cur *cur; /* btree cursor */
fs/xfs/libxfs/xfs_bmap.c
2031
cur = *curp;
fs/xfs/libxfs/xfs_bmap.c
2122
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2126
error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
fs/xfs/libxfs/xfs_bmap.c
2130
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2134
if ((error = xfs_btree_delete(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2137
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2141
if ((error = xfs_btree_decrement(cur, 0, &i)))
fs/xfs/libxfs/xfs_bmap.c
2144
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2148
if ((error = xfs_btree_delete(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2151
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2155
if ((error = xfs_btree_decrement(cur, 0, &i)))
fs/xfs/libxfs/xfs_bmap.c
2158
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2162
error = xfs_bmbt_update(cur, &LEFT);
fs/xfs/libxfs/xfs_bmap.c
2179
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2183
error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
fs/xfs/libxfs/xfs_bmap.c
2187
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2191
if ((error = xfs_btree_delete(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2194
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2198
if ((error = xfs_btree_decrement(cur, 0, &i)))
fs/xfs/libxfs/xfs_bmap.c
2201
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2205
error = xfs_bmbt_update(cur, &LEFT);
fs/xfs/libxfs/xfs_bmap.c
2225
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2229
error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
fs/xfs/libxfs/xfs_bmap.c
2233
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2237
if ((error = xfs_btree_delete(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2240
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2244
if ((error = xfs_btree_decrement(cur, 0, &i)))
fs/xfs/libxfs/xfs_bmap.c
2247
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2251
error = xfs_bmbt_update(cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
2266
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2270
error = xfs_bmbt_lookup_eq(cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
2274
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2278
error = xfs_bmbt_update(cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
2300
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2304
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2308
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2312
error = xfs_bmbt_update(cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
2315
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_bmap.c
2318
error = xfs_bmbt_update(cur, &LEFT);
fs/xfs/libxfs/xfs_bmap.c
2338
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2342
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2346
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2350
error = xfs_bmbt_update(cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
2353
cur->bc_rec.b = *new;
fs/xfs/libxfs/xfs_bmap.c
2354
if ((error = xfs_btree_insert(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2357
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2380
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2384
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2388
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2392
error = xfs_bmbt_update(cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
2395
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_bmap.c
2398
error = xfs_bmbt_update(cur, &RIGHT);
fs/xfs/libxfs/xfs_bmap.c
2417
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2421
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2425
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2429
error = xfs_bmbt_update(cur, &PREV);
fs/xfs/libxfs/xfs_bmap.c
2432
error = xfs_bmbt_lookup_eq(cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
2436
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2440
if ((error = xfs_btree_insert(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2443
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2472
if (cur == NULL)
fs/xfs/libxfs/xfs_bmap.c
2476
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2480
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2485
error = xfs_bmbt_update(cur, &r[1]);
fs/xfs/libxfs/xfs_bmap.c
2489
cur->bc_rec.b = PREV;
fs/xfs/libxfs/xfs_bmap.c
2490
if ((error = xfs_btree_insert(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2493
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2502
error = xfs_bmbt_lookup_eq(cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
2506
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2511
if ((error = xfs_btree_insert(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
2514
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2541
ASSERT(cur == NULL);
fs/xfs/libxfs/xfs_bmap.c
2542
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
fs/xfs/libxfs/xfs_bmap.c
2550
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
2551
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
2552
*curp = cur;
fs/xfs/libxfs/xfs_bmap.c
256
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
2580
struct xfs_btree_cur *cur = *curp;
fs/xfs/libxfs/xfs_bmap.c
2590
ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
fs/xfs/libxfs/xfs_bmap.c
262
if (!cur)
fs/xfs/libxfs/xfs_bmap.c
265
for (i = 0; i < cur->bc_maxlevels; i++) {
fs/xfs/libxfs/xfs_bmap.c
2654
if (cur == NULL) {
fs/xfs/libxfs/xfs_bmap.c
2658
error = xfs_bmbt_lookup_eq(cur, &right, &i);
fs/xfs/libxfs/xfs_bmap.c
266
if (!cur->bc_levels[i].bp)
fs/xfs/libxfs/xfs_bmap.c
2662
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2666
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_bmap.c
2670
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2674
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_bmap.c
2678
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
268
if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
fs/xfs/libxfs/xfs_bmap.c
2682
error = xfs_bmbt_update(cur, &left);
fs/xfs/libxfs/xfs_bmap.c
269
return cur->bc_levels[i].bp;
fs/xfs/libxfs/xfs_bmap.c
2700
if (cur == NULL) {
fs/xfs/libxfs/xfs_bmap.c
2704
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2708
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2712
error = xfs_bmbt_update(cur, &left);
fs/xfs/libxfs/xfs_bmap.c
273
list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
fs/xfs/libxfs/xfs_bmap.c
2731
if (cur == NULL) {
fs/xfs/libxfs/xfs_bmap.c
2735
error = xfs_bmbt_lookup_eq(cur, &old, &i);
fs/xfs/libxfs/xfs_bmap.c
2739
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2743
error = xfs_bmbt_update(cur, &right);
fs/xfs/libxfs/xfs_bmap.c
2758
if (cur == NULL) {
fs/xfs/libxfs/xfs_bmap.c
2762
error = xfs_bmbt_lookup_eq(cur, new, &i);
fs/xfs/libxfs/xfs_bmap.c
2766
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2770
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_bmap.c
2774
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
2790
ASSERT(cur == NULL);
fs/xfs/libxfs/xfs_bmap.c
2794
cur = *curp;
fs/xfs/libxfs/xfs_bmap.c
2800
if (cur)
fs/xfs/libxfs/xfs_bmap.c
2801
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
2803
xfs_bmap_check_leaf_extents(cur, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
342
struct xfs_btree_cur *cur, /* btree cursor or null */
fs/xfs/libxfs/xfs_bmap.c
389
bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
fs/xfs/libxfs/xfs_bmap.c
394
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
3955
if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
fs/xfs/libxfs/xfs_bmap.c
3956
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
3963
if (bma->cur && bma->wasdel)
fs/xfs/libxfs/xfs_bmap.c
3964
bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
fs/xfs/libxfs/xfs_bmap.c
3978
whichfork, &bma->icur, &bma->cur, &bma->got,
fs/xfs/libxfs/xfs_bmap.c
4025
if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
fs/xfs/libxfs/xfs_bmap.c
4026
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
fs/xfs/libxfs/xfs_bmap.c
4044
&bma->icur, &bma->cur, mval, &tmp_logflags);
fs/xfs/libxfs/xfs_bmap.c
411
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
4115
if (bma->cur)
fs/xfs/libxfs/xfs_bmap.c
4116
xfs_btree_del_cursor(bma->cur, error);
fs/xfs/libxfs/xfs_bmap.c
4317
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
fs/xfs/libxfs/xfs_bmap.c
4464
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
fs/xfs/libxfs/xfs_bmap.c
4523
struct xfs_btree_cur *cur = NULL;
fs/xfs/libxfs/xfs_bmap.c
4562
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
4573
&cur, &got, &logflags, flags);
fs/xfs/libxfs/xfs_bmap.c
4577
error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
fs/xfs/libxfs/xfs_bmap.c
4587
if (cur)
fs/xfs/libxfs/xfs_bmap.c
4588
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.c
476
bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
fs/xfs/libxfs/xfs_bmap.c
481
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
4913
struct xfs_btree_cur *cur, /* if null, not a btree */
fs/xfs/libxfs/xfs_bmap.c
4971
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
4972
error = xfs_bmbt_lookup_eq(cur, &got, &i);
fs/xfs/libxfs/xfs_bmap.c
4976
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
4996
if (!cur) {
fs/xfs/libxfs/xfs_bmap.c
5000
if ((error = xfs_btree_delete(cur, &i)))
fs/xfs/libxfs/xfs_bmap.c
5003
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5015
if (!cur) {
fs/xfs/libxfs/xfs_bmap.c
5019
error = xfs_bmbt_update(cur, &got);
fs/xfs/libxfs/xfs_bmap.c
5029
if (!cur) {
fs/xfs/libxfs/xfs_bmap.c
5033
error = xfs_bmbt_update(cur, &got);
fs/xfs/libxfs/xfs_bmap.c
5053
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
5054
error = xfs_bmbt_update(cur, &got);
fs/xfs/libxfs/xfs_bmap.c
5057
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_bmap.c
5060
cur->bc_rec.b = new;
fs/xfs/libxfs/xfs_bmap.c
5061
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_bmap.c
5074
error = xfs_bmbt_lookup_eq(cur, &got, &i);
fs/xfs/libxfs/xfs_bmap.c
5078
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5085
error = xfs_bmbt_update(cur, &old);
fs/xfs/libxfs/xfs_bmap.c
5097
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5179
struct xfs_btree_cur *cur; /* bmap btree cursor */
fs/xfs/libxfs/xfs_bmap.c
5234
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
5236
cur = NULL;
fs/xfs/libxfs/xfs_bmap.c
5319
whichfork, &icur, &cur, &del,
fs/xfs/libxfs/xfs_bmap.c
5379
ip, whichfork, &icur, &cur,
fs/xfs/libxfs/xfs_bmap.c
5388
ip, whichfork, &icur, &cur,
fs/xfs/libxfs/xfs_bmap.c
5401
error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
fs/xfs/libxfs/xfs_bmap.c
5433
ASSERT(cur == NULL);
fs/xfs/libxfs/xfs_bmap.c
5434
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
fs/xfs/libxfs/xfs_bmap.c
5438
error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
fs/xfs/libxfs/xfs_bmap.c
544
#define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
fs/xfs/libxfs/xfs_bmap.c
5459
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
5461
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
5462
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.c
5533
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
5556
if (!cur) {
fs/xfs/libxfs/xfs_bmap.c
5562
error = xfs_bmbt_lookup_eq(cur, got, &i);
fs/xfs/libxfs/xfs_bmap.c
5566
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5570
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_bmap.c
5574
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5579
error = xfs_bmbt_lookup_eq(cur, left, &i);
fs/xfs/libxfs/xfs_bmap.c
5583
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5587
error = xfs_bmbt_update(cur, &new);
fs/xfs/libxfs/xfs_bmap.c
5592
error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
fs/xfs/libxfs/xfs_bmap.c
5617
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
5629
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
563
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_bmap.c
5630
error = xfs_bmbt_lookup_eq(cur, &prev, &i);
fs/xfs/libxfs/xfs_bmap.c
5634
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5638
error = xfs_bmbt_update(cur, got);
fs/xfs/libxfs/xfs_bmap.c
5665
struct xfs_btree_cur *cur = NULL;
fs/xfs/libxfs/xfs_bmap.c
5688
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
5711
cur, &logflags);
fs/xfs/libxfs/xfs_bmap.c
5724
cur, &logflags, new_startoff);
fs/xfs/libxfs/xfs_bmap.c
5736
if (cur)
fs/xfs/libxfs/xfs_bmap.c
5737
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.c
5781
struct xfs_btree_cur *cur = NULL;
fs/xfs/libxfs/xfs_bmap.c
5804
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
581
ASSERT(cur);
fs/xfs/libxfs/xfs_bmap.c
5850
cur, &logflags, new_startoff);
fs/xfs/libxfs/xfs_bmap.c
5862
if (cur)
fs/xfs/libxfs/xfs_bmap.c
5863
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.c
5883
struct xfs_btree_cur *cur = NULL;
fs/xfs/libxfs/xfs_bmap.c
591
if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_verify_fsbno(mp, cbno))) {
fs/xfs/libxfs/xfs_bmap.c
592
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5921
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
5922
error = xfs_bmbt_lookup_eq(cur, &got, &i);
fs/xfs/libxfs/xfs_bmap.c
5926
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5937
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
5938
error = xfs_bmbt_update(cur, &got);
fs/xfs/libxfs/xfs_bmap.c
5949
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
5950
error = xfs_bmbt_lookup_eq(cur, &new, &i);
fs/xfs/libxfs/xfs_bmap.c
5954
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5958
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_bmap.c
5962
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5974
ASSERT(cur == NULL);
fs/xfs/libxfs/xfs_bmap.c
5975
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
fs/xfs/libxfs/xfs_bmap.c
598
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
5981
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
5982
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
5983
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.c
602
if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
fs/xfs/libxfs/xfs_bmap.c
606
error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo,
fs/xfs/libxfs/xfs_bmap.c
614
if (cur->bc_levels[0].bp == cbp)
fs/xfs/libxfs/xfs_bmap.c
615
cur->bc_levels[0].bp = NULL;
fs/xfs/libxfs/xfs_bmap.c
6186
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
6195
fa = xfs_bmap_validate_extent(cur->bc_ino.ip, cur->bc_ino.whichfork,
fs/xfs/libxfs/xfs_bmap.c
6198
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
6199
return xfs_bmap_complain_bad_rec(cur->bc_ino.ip,
fs/xfs/libxfs/xfs_bmap.c
6200
cur->bc_ino.whichfork, fa, &irec);
fs/xfs/libxfs/xfs_bmap.c
6203
return query->fn(cur, &irec, query->priv);
fs/xfs/libxfs/xfs_bmap.c
6209
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.c
6218
return xfs_btree_query_all(cur, xfs_bmap_query_range_helper, &query);
fs/xfs/libxfs/xfs_bmap.c
641
struct xfs_btree_cur *cur; /* bmap btree cursor */
fs/xfs/libxfs/xfs_bmap.c
669
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap.c
671
cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
fs/xfs/libxfs/xfs_bmap.c
697
cur->bc_bmap.allocated++;
fs/xfs/libxfs/xfs_bmap.c
728
pp = xfs_bmbt_ptr_addr(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
fs/xfs/libxfs/xfs_bmap.c
736
xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
fs/xfs/libxfs/xfs_bmap.c
737
xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
fs/xfs/libxfs/xfs_bmap.c
739
*curp = cur;
fs/xfs/libxfs/xfs_bmap.c
749
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_bmap.c
890
struct xfs_btree_cur *cur; /* btree cursor */
fs/xfs/libxfs/xfs_bmap.c
900
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_bmap.c
901
error = xfs_bmbt_lookup_first(cur, &stat);
fs/xfs/libxfs/xfs_bmap.c
906
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_bmap.c
910
if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
fs/xfs/libxfs/xfs_bmap.c
913
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_bmap.c
916
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
917
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_bmap.c
921
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_bmap.c
934
struct xfs_btree_cur *cur; /* bmap btree cursor */
fs/xfs/libxfs/xfs_bmap.c
940
cur = NULL;
fs/xfs/libxfs/xfs_bmap.c
941
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
fs/xfs/libxfs/xfs_bmap.c
943
if (cur) {
fs/xfs/libxfs/xfs_bmap.c
944
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap.c
945
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap.h
210
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
fs/xfs/libxfs/xfs_bmap.h
213
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
fs/xfs/libxfs/xfs_bmap.h
293
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap.h
297
int xfs_bmap_query_all(struct xfs_btree_cur *cur, xfs_bmap_query_range_fn fn,
fs/xfs/libxfs/xfs_bmap.h
30
struct xfs_btree_cur *cur; /* btree cursor */
fs/xfs/libxfs/xfs_bmap_btree.c
182
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_bmap_btree.c
186
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
fs/xfs/libxfs/xfs_bmap_btree.c
187
cur->bc_ino.ip, cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
188
new->bc_flags |= (cur->bc_flags &
fs/xfs/libxfs/xfs_bmap_btree.c
209
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
218
args.tp = cur->bc_tp;
fs/xfs/libxfs/xfs_bmap_btree.c
219
args.mp = cur->bc_mp;
fs/xfs/libxfs/xfs_bmap_btree.c
220
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
fs/xfs/libxfs/xfs_bmap_btree.c
221
cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
223
args.wasdel = cur->bc_flags & XFS_BTREE_BMBT_WASDEL;
fs/xfs/libxfs/xfs_bmap_btree.c
233
if (cur->bc_tp->t_highest_agno == NULLAGNUMBER)
fs/xfs/libxfs/xfs_bmap_btree.c
234
args.minleft = xfs_bmapi_minleft(cur->bc_tp, cur->bc_ino.ip,
fs/xfs/libxfs/xfs_bmap_btree.c
235
cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
251
cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
fs/xfs/libxfs/xfs_bmap_btree.c
259
cur->bc_bmap.allocated++;
fs/xfs/libxfs/xfs_bmap_btree.c
260
cur->bc_ino.ip->i_nblocks++;
fs/xfs/libxfs/xfs_bmap_btree.c
261
xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
fs/xfs/libxfs/xfs_bmap_btree.c
262
xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
fs/xfs/libxfs/xfs_bmap_btree.c
273
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
276
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_bmap_btree.c
277
struct xfs_inode *ip = cur->bc_ino.ip;
fs/xfs/libxfs/xfs_bmap_btree.c
278
struct xfs_trans *tp = cur->bc_tp;
fs/xfs/libxfs/xfs_bmap_btree.c
283
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
284
error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo,
fs/xfs/libxfs/xfs_bmap_btree.c
297
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
300
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_bmap_btree.c
301
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_bmap_btree.c
303
return xfs_bmbt_maxrecs(cur->bc_mp,
fs/xfs/libxfs/xfs_bmap_btree.c
307
return cur->bc_mp->m_bmap_dmnr[level != 0];
fs/xfs/libxfs/xfs_bmap_btree.c
312
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
315
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_bmap_btree.c
316
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_bmap_btree.c
318
return xfs_bmbt_maxrecs(cur->bc_mp,
fs/xfs/libxfs/xfs_bmap_btree.c
322
return cur->bc_mp->m_bmap_dmxr[level != 0];
fs/xfs/libxfs/xfs_bmap_btree.c
337
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
340
if (level != cur->bc_nlevels - 1)
fs/xfs/libxfs/xfs_bmap_btree.c
341
return cur->bc_mp->m_bmap_dmxr[level != 0];
fs/xfs/libxfs/xfs_bmap_btree.c
342
return xfs_bmdr_maxrecs(cur->bc_ino.forksize, level == 0);
fs/xfs/libxfs/xfs_bmap_btree.c
366
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
369
xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
fs/xfs/libxfs/xfs_bmap_btree.c
374
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
378
cur->bc_rec.b.br_startoff);
fs/xfs/libxfs/xfs_bmap_btree.c
383
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
475
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
485
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
496
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
610
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
613
return xfs_bmap_broot_realloc(cur->bc_ino.ip, cur->bc_ino.whichfork,
fs/xfs/libxfs/xfs_bmap_btree.c
659
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_bmap_btree.c
676
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_bmbt_ops, maxlevels,
fs/xfs/libxfs/xfs_bmap_btree.c
678
cur->bc_ino.ip = ip;
fs/xfs/libxfs/xfs_bmap_btree.c
679
cur->bc_ino.whichfork = whichfork;
fs/xfs/libxfs/xfs_bmap_btree.c
680
cur->bc_bmap.allocated = 0;
fs/xfs/libxfs/xfs_bmap_btree.c
684
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
fs/xfs/libxfs/xfs_bmap_btree.c
685
cur->bc_ino.forksize = xfs_inode_fork_size(ip, whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
687
return cur;
fs/xfs/libxfs/xfs_bmap_btree.c
707
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_bmap_btree.c
711
struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
fs/xfs/libxfs/xfs_bmap_btree.c
717
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_bmap_btree.c
725
ifp = xfs_ifork_ptr(cur->bc_ino.ip, whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
740
xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
fs/xfs/libxfs/xfs_bmap_btree.c
741
xfs_btree_commit_ifakeroot(cur, tp, whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
819
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_bmap_btree.c
826
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
fs/xfs/libxfs/xfs_bmap_btree.c
827
cur->bc_flags |= XFS_BTREE_BMBT_INVALID_OWNER;
fs/xfs/libxfs/xfs_bmap_btree.c
829
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
fs/xfs/libxfs/xfs_bmap_btree.c
830
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_bmap_btree.h
47
void xfs_bmbt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1000
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
1009
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1013
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
1014
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_btree.c
1022
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
1029
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
1042
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
1052
if (xfs_btree_at_iroot(cur, lev))
fs/xfs/libxfs/xfs_btree.c
1055
if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra)
fs/xfs/libxfs/xfs_btree.c
1058
cur->bc_levels[lev].ra |= lr;
fs/xfs/libxfs/xfs_btree.c
1059
block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp);
fs/xfs/libxfs/xfs_btree.c
1061
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
1063
return xfs_btree_readahead_agblock(cur, lr, block);
fs/xfs/libxfs/xfs_btree.c
1065
return xfs_btree_readahead_fsblock(cur, lr, block);
fs/xfs/libxfs/xfs_btree.c
1067
return xfs_btree_readahead_memblock(cur, lr, block);
fs/xfs/libxfs/xfs_btree.c
1076
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1082
error = xfs_btree_check_ptr(cur, ptr, 0, 1);
fs/xfs/libxfs/xfs_btree.c
1086
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
1088
*daddr = xfs_agbno_to_daddr(to_perag(cur->bc_group),
fs/xfs/libxfs/xfs_btree.c
1092
*daddr = XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
fs/xfs/libxfs/xfs_btree.c
1109
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1115
if (xfs_btree_ptr_to_daddr(cur, ptr, &daddr))
fs/xfs/libxfs/xfs_btree.c
1117
xfs_buf_readahead(xfs_btree_buftarg(cur), daddr,
fs/xfs/libxfs/xfs_btree.c
1118
xfs_btree_bbsize(cur) * count,
fs/xfs/libxfs/xfs_btree.c
1119
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
1128
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
1134
if (cur->bc_levels[lev].bp)
fs/xfs/libxfs/xfs_btree.c
1135
xfs_trans_brelse(cur->bc_tp, cur->bc_levels[lev].bp);
fs/xfs/libxfs/xfs_btree.c
1136
cur->bc_levels[lev].bp = bp;
fs/xfs/libxfs/xfs_btree.c
1137
cur->bc_levels[lev].ra = 0;
fs/xfs/libxfs/xfs_btree.c
1140
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/libxfs/xfs_btree.c
1142
cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
fs/xfs/libxfs/xfs_btree.c
1144
cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA;
fs/xfs/libxfs/xfs_btree.c
1147
cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
fs/xfs/libxfs/xfs_btree.c
1149
cur->bc_levels[lev].ra |= XFS_BTCUR_RIGHTRA;
fs/xfs/libxfs/xfs_btree.c
1155
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1158
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
1166
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1169
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
1177
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1181
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
1191
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1198
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/libxfs/xfs_btree.c
1213
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1220
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/libxfs/xfs_btree.c
126
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1302
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
1304
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
1306
return cur->bc_mem.xfbtree->owner;
fs/xfs/libxfs/xfs_btree.c
1308
return cur->bc_ino.ip->i_ino;
fs/xfs/libxfs/xfs_btree.c
131
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
1310
return cur->bc_group->xg_gno;
fs/xfs/libxfs/xfs_btree.c
1319
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1324
xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs,
fs/xfs/libxfs/xfs_btree.c
1325
xfs_btree_owner(cur));
fs/xfs/libxfs/xfs_btree.c
1330
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1334
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
1336
ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
1340
ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
1351
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1354
xfs_buf_set_ref(bp, cur->bc_ops->lru_refs);
fs/xfs/libxfs/xfs_btree.c
1359
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1367
error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
fs/xfs/libxfs/xfs_btree.c
1370
error = xfs_trans_get_buf(cur->bc_tp, xfs_btree_buftarg(cur), d,
fs/xfs/libxfs/xfs_btree.c
1371
xfs_btree_bbsize(cur), 0, bpp);
fs/xfs/libxfs/xfs_btree.c
1375
(*bpp)->b_ops = cur->bc_ops->buf_ops;
fs/xfs/libxfs/xfs_btree.c
1386
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1392
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
1399
error = xfs_btree_ptr_to_daddr(cur, ptr, &d);
fs/xfs/libxfs/xfs_btree.c
1402
error = xfs_trans_read_buf(mp, cur->bc_tp, xfs_btree_buftarg(cur), d,
fs/xfs/libxfs/xfs_btree.c
1403
xfs_btree_bbsize(cur), flags, bpp,
fs/xfs/libxfs/xfs_btree.c
1404
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
1406
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
1410
xfs_btree_set_refs(cur, *bpp);
fs/xfs/libxfs/xfs_btree.c
1420
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1426
memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len);
fs/xfs/libxfs/xfs_btree.c
143
if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
fs/xfs/libxfs/xfs_btree.c
1434
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1440
memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len);
fs/xfs/libxfs/xfs_btree.c
1448
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1454
memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len);
fs/xfs/libxfs/xfs_btree.c
1462
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1472
dst_key = (char *)key + (dir * cur->bc_ops->key_len);
fs/xfs/libxfs/xfs_btree.c
1473
memmove(dst_key, key, numkeys * cur->bc_ops->key_len);
fs/xfs/libxfs/xfs_btree.c
148
cur->bc_ops->get_maxrecs(cur, level))
fs/xfs/libxfs/xfs_btree.c
1481
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1491
dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len);
fs/xfs/libxfs/xfs_btree.c
1492
memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len);
fs/xfs/libxfs/xfs_btree.c
1500
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1510
dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len);
fs/xfs/libxfs/xfs_btree.c
1511
memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len);
fs/xfs/libxfs/xfs_btree.c
1519
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1526
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
fs/xfs/libxfs/xfs_btree.c
1527
xfs_trans_log_buf(cur->bc_tp, bp,
fs/xfs/libxfs/xfs_btree.c
1528
xfs_btree_key_offset(cur, first),
fs/xfs/libxfs/xfs_btree.c
1529
xfs_btree_key_offset(cur, last + 1) - 1);
fs/xfs/libxfs/xfs_btree.c
1531
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
fs/xfs/libxfs/xfs_btree.c
1532
xfs_ilog_fbroot(cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_btree.c
1541
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1547
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
fs/xfs/libxfs/xfs_btree.c
1548
xfs_ilog_fbroot(cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_btree.c
1552
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
fs/xfs/libxfs/xfs_btree.c
1553
xfs_trans_log_buf(cur->bc_tp, bp,
fs/xfs/libxfs/xfs_btree.c
1554
xfs_btree_rec_offset(cur, first),
fs/xfs/libxfs/xfs_btree.c
1555
xfs_btree_rec_offset(cur, last + 1) - 1);
fs/xfs/libxfs/xfs_btree.c
1563
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
1573
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
fs/xfs/libxfs/xfs_btree.c
1574
xfs_trans_log_buf(cur->bc_tp, bp,
fs/xfs/libxfs/xfs_btree.c
1575
xfs_btree_ptr_offset(cur, first, level),
fs/xfs/libxfs/xfs_btree.c
1576
xfs_btree_ptr_offset(cur, last + 1, level) - 1);
fs/xfs/libxfs/xfs_btree.c
1578
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
fs/xfs/libxfs/xfs_btree.c
1579
xfs_ilog_fbroot(cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_btree.c
1589
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
160
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1626
if (xfs_has_crc(cur->bc_mp)) {
fs/xfs/libxfs/xfs_btree.c
1641
(cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ?
fs/xfs/libxfs/xfs_btree.c
1644
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
fs/xfs/libxfs/xfs_btree.c
1645
xfs_trans_log_buf(cur->bc_tp, bp, first, last);
fs/xfs/libxfs/xfs_btree.c
1647
xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip,
fs/xfs/libxfs/xfs_btree.c
1648
xfs_ilog_fbroot(cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_btree.c
165
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
1658
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1668
ASSERT(level < cur->bc_nlevels);
fs/xfs/libxfs/xfs_btree.c
1671
xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
fs/xfs/libxfs/xfs_btree.c
1674
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
1677
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
1683
if (++cur->bc_levels[level].ptr <= xfs_btree_get_numrecs(block))
fs/xfs/libxfs/xfs_btree.c
1687
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
1688
if (xfs_btree_ptr_is_null(cur, &ptr))
fs/xfs/libxfs/xfs_btree.c
169
fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
1691
XFS_BTREE_STATS_INC(cur, increment);
fs/xfs/libxfs/xfs_btree.c
1697
for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
fs/xfs/libxfs/xfs_btree.c
1698
block = xfs_btree_get_block(cur, lev, &bp);
fs/xfs/libxfs/xfs_btree.c
1701
error = xfs_btree_check_block(cur, block, lev, bp);
fs/xfs/libxfs/xfs_btree.c
1706
if (++cur->bc_levels[lev].ptr <= xfs_btree_get_numrecs(block))
fs/xfs/libxfs/xfs_btree.c
1710
xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA);
fs/xfs/libxfs/xfs_btree.c
1717
if (lev == cur->bc_nlevels) {
fs/xfs/libxfs/xfs_btree.c
1718
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
fs/xfs/libxfs/xfs_btree.c
1721
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
1725
ASSERT(lev < cur->bc_nlevels);
fs/xfs/libxfs/xfs_btree.c
1731
for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
fs/xfs/libxfs/xfs_btree.c
1734
ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block);
fs/xfs/libxfs/xfs_btree.c
1736
error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
fs/xfs/libxfs/xfs_btree.c
1740
xfs_btree_setbuf(cur, lev, bp);
fs/xfs/libxfs/xfs_btree.c
1741
cur->bc_levels[lev].ptr = 1;
fs/xfs/libxfs/xfs_btree.c
1761
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1771
ASSERT(level < cur->bc_nlevels);
fs/xfs/libxfs/xfs_btree.c
1774
xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA);
fs/xfs/libxfs/xfs_btree.c
1777
if (--cur->bc_levels[level].ptr > 0)
fs/xfs/libxfs/xfs_btree.c
1781
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
1784
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
1790
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
1791
if (xfs_btree_ptr_is_null(cur, &ptr))
fs/xfs/libxfs/xfs_btree.c
1794
XFS_BTREE_STATS_INC(cur, decrement);
fs/xfs/libxfs/xfs_btree.c
1800
for (lev = level + 1; lev < cur->bc_nlevels; lev++) {
fs/xfs/libxfs/xfs_btree.c
1801
if (--cur->bc_levels[lev].ptr > 0)
fs/xfs/libxfs/xfs_btree.c
1804
xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA);
fs/xfs/libxfs/xfs_btree.c
1811
if (lev == cur->bc_nlevels) {
fs/xfs/libxfs/xfs_btree.c
1812
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
fs/xfs/libxfs/xfs_btree.c
1815
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
1819
ASSERT(lev < cur->bc_nlevels);
fs/xfs/libxfs/xfs_btree.c
1825
for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) {
fs/xfs/libxfs/xfs_btree.c
1828
ptrp = xfs_btree_ptr_addr(cur, cur->bc_levels[lev].ptr, block);
fs/xfs/libxfs/xfs_btree.c
1830
error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp);
fs/xfs/libxfs/xfs_btree.c
1833
xfs_btree_setbuf(cur, lev, bp);
fs/xfs/libxfs/xfs_btree.c
1834
cur->bc_levels[lev].ptr = xfs_btree_get_numrecs(block);
fs/xfs/libxfs/xfs_btree.c
1854
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1859
if (!xfs_has_crc(cur->bc_mp) ||
fs/xfs/libxfs/xfs_btree.c
1860
(cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER))
fs/xfs/libxfs/xfs_btree.c
1863
owner = xfs_btree_owner(cur);
fs/xfs/libxfs/xfs_btree.c
1864
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/libxfs/xfs_btree.c
1877
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
1887
if (xfs_btree_at_iroot(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
1888
*blkp = xfs_btree_get_iroot(cur);
fs/xfs/libxfs/xfs_btree.c
1898
bp = cur->bc_levels[level].bp;
fs/xfs/libxfs/xfs_btree.c
1899
error = xfs_btree_ptr_to_daddr(cur, pp, &daddr);
fs/xfs/libxfs/xfs_btree.c
1907
error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp);
fs/xfs/libxfs/xfs_btree.c
1912
if (xfs_btree_check_block_owner(cur, *blkp) != NULL)
fs/xfs/libxfs/xfs_btree.c
1923
xfs_btree_setbuf(cur, level, bp);
fs/xfs/libxfs/xfs_btree.c
1929
xfs_trans_brelse(cur->bc_tp, bp);
fs/xfs/libxfs/xfs_btree.c
1930
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
1941
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1948
cur->bc_ops->init_key_from_rec(kp,
fs/xfs/libxfs/xfs_btree.c
1949
xfs_btree_rec_addr(cur, keyno, block));
fs/xfs/libxfs/xfs_btree.c
1953
return xfs_btree_key_addr(cur, keyno, block);
fs/xfs/libxfs/xfs_btree.c
1961
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
1964
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
fs/xfs/libxfs/xfs_btree.c
1970
} else if (cur->bc_flags & XFS_BTREE_STAGING) {
fs/xfs/libxfs/xfs_btree.c
1971
ptr->s = cpu_to_be32(cur->bc_ag.afake->af_root);
fs/xfs/libxfs/xfs_btree.c
1973
cur->bc_ops->init_ptr_from_cur(cur, ptr);
fs/xfs/libxfs/xfs_btree.c
1983
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
1995
XFS_BTREE_STATS_INC(cur, lookup);
fs/xfs/libxfs/xfs_btree.c
1998
if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0)) {
fs/xfs/libxfs/xfs_btree.c
1999
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
200
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2007
xfs_btree_init_ptr_from_cur(cur, &ptr);
fs/xfs/libxfs/xfs_btree.c
2016
for (level = cur->bc_nlevels - 1, cmp_r = 1; level >= 0; level--) {
fs/xfs/libxfs/xfs_btree.c
2018
error = xfs_btree_lookup_get_block(cur, level, pp, &block);
fs/xfs/libxfs/xfs_btree.c
2039
if (level != 0 || cur->bc_nlevels != 1) {
fs/xfs/libxfs/xfs_btree.c
2042
cur->bc_mp, block,
fs/xfs/libxfs/xfs_btree.c
2044
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
2048
cur->bc_levels[0].ptr = dir != XFS_LOOKUP_LE;
fs/xfs/libxfs/xfs_btree.c
205
struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target;
fs/xfs/libxfs/xfs_btree.c
2058
XFS_BTREE_STATS_INC(cur, compare);
fs/xfs/libxfs/xfs_btree.c
2064
kp = xfs_lookup_get_search_key(cur, level,
fs/xfs/libxfs/xfs_btree.c
2074
cmp_r = cur->bc_ops->cmp_key_with_cur(cur, kp);
fs/xfs/libxfs/xfs_btree.c
209
fa = __xfs_btree_check_lblock_hdr(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
2095
pp = xfs_btree_ptr_addr(cur, keyno, block);
fs/xfs/libxfs/xfs_btree.c
2097
error = xfs_btree_debug_check_ptr(cur, pp, 0, level);
fs/xfs/libxfs/xfs_btree.c
2101
cur->bc_levels[level].ptr = keyno;
fs/xfs/libxfs/xfs_btree.c
2112
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
2115
!xfs_btree_ptr_is_null(cur, &ptr)) {
fs/xfs/libxfs/xfs_btree.c
2118
cur->bc_levels[0].ptr = keyno;
fs/xfs/libxfs/xfs_btree.c
2119
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_btree.c
2122
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
2123
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
2131
cur->bc_levels[0].ptr = keyno;
fs/xfs/libxfs/xfs_btree.c
2149
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2152
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
fs/xfs/libxfs/xfs_btree.c
2154
(cur->bc_ops->key_len / 2));
fs/xfs/libxfs/xfs_btree.c
2160
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2170
rec = xfs_btree_rec_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
2171
cur->bc_ops->init_key_from_rec(key, rec);
fs/xfs/libxfs/xfs_btree.c
2173
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
fs/xfs/libxfs/xfs_btree.c
2175
cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
fs/xfs/libxfs/xfs_btree.c
2177
rec = xfs_btree_rec_addr(cur, n, block);
fs/xfs/libxfs/xfs_btree.c
2178
cur->bc_ops->init_high_key_from_rec(&hkey, rec);
fs/xfs/libxfs/xfs_btree.c
2179
if (xfs_btree_keycmp_gt(cur, &hkey, &max_hkey))
fs/xfs/libxfs/xfs_btree.c
2183
high = xfs_btree_high_key_from_key(cur, key);
fs/xfs/libxfs/xfs_btree.c
2184
memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
fs/xfs/libxfs/xfs_btree.c
2191
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2200
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
fs/xfs/libxfs/xfs_btree.c
2201
memcpy(key, xfs_btree_key_addr(cur, 1, block),
fs/xfs/libxfs/xfs_btree.c
2202
cur->bc_ops->key_len / 2);
fs/xfs/libxfs/xfs_btree.c
2204
max_hkey = xfs_btree_high_key_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
2206
hkey = xfs_btree_high_key_addr(cur, n, block);
fs/xfs/libxfs/xfs_btree.c
2207
if (xfs_btree_keycmp_gt(cur, hkey, max_hkey))
fs/xfs/libxfs/xfs_btree.c
2211
high = xfs_btree_high_key_from_key(cur, key);
fs/xfs/libxfs/xfs_btree.c
2212
memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
fs/xfs/libxfs/xfs_btree.c
2214
memcpy(key, xfs_btree_key_addr(cur, 1, block),
fs/xfs/libxfs/xfs_btree.c
2215
cur->bc_ops->key_len);
fs/xfs/libxfs/xfs_btree.c
2222
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2227
xfs_btree_get_leaf_keys(cur, block, key);
fs/xfs/libxfs/xfs_btree.c
2229
xfs_btree_get_node_keys(cur, block, key);
fs/xfs/libxfs/xfs_btree.c
2241
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2244
return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1;
fs/xfs/libxfs/xfs_btree.c
2254
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2268
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
fs/xfs/libxfs/xfs_btree.c
2271
if (level + 1 >= cur->bc_nlevels)
fs/xfs/libxfs/xfs_btree.c
2274
trace_xfs_btree_updkeys(cur, level, bp0);
fs/xfs/libxfs/xfs_btree.c
2277
hkey = xfs_btree_high_key_from_key(cur, lkey);
fs/xfs/libxfs/xfs_btree.c
2278
xfs_btree_get_keys(cur, block, lkey);
fs/xfs/libxfs/xfs_btree.c
2279
for (level++; level < cur->bc_nlevels; level++) {
fs/xfs/libxfs/xfs_btree.c
228
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2283
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
2284
trace_xfs_btree_updkeys(cur, level, bp);
fs/xfs/libxfs/xfs_btree.c
2286
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
2290
ptr = cur->bc_levels[level].ptr;
fs/xfs/libxfs/xfs_btree.c
2291
nlkey = xfs_btree_key_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
2292
nhkey = xfs_btree_high_key_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
2294
xfs_btree_keycmp_eq(cur, nlkey, lkey) &&
fs/xfs/libxfs/xfs_btree.c
2295
xfs_btree_keycmp_eq(cur, nhkey, hkey))
fs/xfs/libxfs/xfs_btree.c
2297
xfs_btree_copy_keys(cur, nlkey, lkey, 1);
fs/xfs/libxfs/xfs_btree.c
2298
xfs_btree_log_keys(cur, bp, ptr, ptr);
fs/xfs/libxfs/xfs_btree.c
2299
if (level + 1 >= cur->bc_nlevels)
fs/xfs/libxfs/xfs_btree.c
2301
xfs_btree_get_node_keys(cur, block, lkey);
fs/xfs/libxfs/xfs_btree.c
2310
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2316
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
2317
return __xfs_btree_updkeys(cur, level, block, bp, true);
fs/xfs/libxfs/xfs_btree.c
2325
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
233
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
2336
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
2337
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)
fs/xfs/libxfs/xfs_btree.c
2338
return __xfs_btree_updkeys(cur, level, block, bp, false);
fs/xfs/libxfs/xfs_btree.c
234
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_btree.c
2346
xfs_btree_get_keys(cur, block, &key);
fs/xfs/libxfs/xfs_btree.c
2347
for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
fs/xfs/libxfs/xfs_btree.c
2351
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
2353
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
2357
ptr = cur->bc_levels[level].ptr;
fs/xfs/libxfs/xfs_btree.c
2358
kp = xfs_btree_key_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
2359
xfs_btree_copy_keys(cur, kp, &key, 1);
fs/xfs/libxfs/xfs_btree.c
2360
xfs_btree_log_keys(cur, bp, ptr, ptr);
fs/xfs/libxfs/xfs_btree.c
2373
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2383
block = xfs_btree_get_block(cur, 0, &bp);
fs/xfs/libxfs/xfs_btree.c
2386
error = xfs_btree_check_block(cur, block, 0, bp);
fs/xfs/libxfs/xfs_btree.c
2391
ptr = cur->bc_levels[0].ptr;
fs/xfs/libxfs/xfs_btree.c
2392
rp = xfs_btree_rec_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
2395
xfs_btree_copy_recs(cur, rp, rec, 1);
fs/xfs/libxfs/xfs_btree.c
2396
xfs_btree_log_recs(cur, bp, ptr, ptr);
fs/xfs/libxfs/xfs_btree.c
2399
if (xfs_btree_needs_key_update(cur, ptr)) {
fs/xfs/libxfs/xfs_btree.c
2400
error = xfs_btree_update_keys(cur, 0);
fs/xfs/libxfs/xfs_btree.c
2417
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2435
if (xfs_btree_at_iroot(cur, level))
fs/xfs/libxfs/xfs_btree.c
2439
right = xfs_btree_get_block(cur, level, &rbp);
fs/xfs/libxfs/xfs_btree.c
2442
error = xfs_btree_check_block(cur, right, level, rbp);
fs/xfs/libxfs/xfs_btree.c
2448
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
2449
if (xfs_btree_ptr_is_null(cur, &lptr))
fs/xfs/libxfs/xfs_btree.c
245
if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
fs/xfs/libxfs/xfs_btree.c
2456
if (cur->bc_levels[level].ptr <= 1)
fs/xfs/libxfs/xfs_btree.c
2460
error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
fs/xfs/libxfs/xfs_btree.c
2466
if (lrecs == cur->bc_ops->get_maxrecs(cur, level))
fs/xfs/libxfs/xfs_btree.c
2479
XFS_BTREE_STATS_INC(cur, lshift);
fs/xfs/libxfs/xfs_btree.c
2480
XFS_BTREE_STATS_ADD(cur, moves, 1);
fs/xfs/libxfs/xfs_btree.c
2491
lkp = xfs_btree_key_addr(cur, lrecs, left);
fs/xfs/libxfs/xfs_btree.c
2492
rkp = xfs_btree_key_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2494
lpp = xfs_btree_ptr_addr(cur, lrecs, left);
fs/xfs/libxfs/xfs_btree.c
2495
rpp = xfs_btree_ptr_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2497
error = xfs_btree_debug_check_ptr(cur, rpp, 0, level);
fs/xfs/libxfs/xfs_btree.c
250
cur->bc_ops->get_maxrecs(cur, level))
fs/xfs/libxfs/xfs_btree.c
2501
xfs_btree_copy_keys(cur, lkp, rkp, 1);
fs/xfs/libxfs/xfs_btree.c
2502
xfs_btree_copy_ptrs(cur, lpp, rpp, 1);
fs/xfs/libxfs/xfs_btree.c
2504
xfs_btree_log_keys(cur, lbp, lrecs, lrecs);
fs/xfs/libxfs/xfs_btree.c
2505
xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs);
fs/xfs/libxfs/xfs_btree.c
2507
ASSERT(cur->bc_ops->keys_inorder(cur,
fs/xfs/libxfs/xfs_btree.c
2508
xfs_btree_key_addr(cur, lrecs - 1, left), lkp));
fs/xfs/libxfs/xfs_btree.c
2513
lrp = xfs_btree_rec_addr(cur, lrecs, left);
fs/xfs/libxfs/xfs_btree.c
2514
rrp = xfs_btree_rec_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2516
xfs_btree_copy_recs(cur, lrp, rrp, 1);
fs/xfs/libxfs/xfs_btree.c
2517
xfs_btree_log_recs(cur, lbp, lrecs, lrecs);
fs/xfs/libxfs/xfs_btree.c
2519
ASSERT(cur->bc_ops->recs_inorder(cur,
fs/xfs/libxfs/xfs_btree.c
2520
xfs_btree_rec_addr(cur, lrecs - 1, left), lrp));
fs/xfs/libxfs/xfs_btree.c
2524
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
fs/xfs/libxfs/xfs_btree.c
2527
xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
fs/xfs/libxfs/xfs_btree.c
2532
XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1);
fs/xfs/libxfs/xfs_btree.c
2536
error = xfs_btree_debug_check_ptr(cur, rpp, i + 1, level);
fs/xfs/libxfs/xfs_btree.c
2541
xfs_btree_shift_keys(cur,
fs/xfs/libxfs/xfs_btree.c
2542
xfs_btree_key_addr(cur, 2, right),
fs/xfs/libxfs/xfs_btree.c
2544
xfs_btree_shift_ptrs(cur,
fs/xfs/libxfs/xfs_btree.c
2545
xfs_btree_ptr_addr(cur, 2, right),
fs/xfs/libxfs/xfs_btree.c
2548
xfs_btree_log_keys(cur, rbp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2549
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2552
xfs_btree_shift_recs(cur,
fs/xfs/libxfs/xfs_btree.c
2553
xfs_btree_rec_addr(cur, 2, right),
fs/xfs/libxfs/xfs_btree.c
2555
xfs_btree_log_recs(cur, rbp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2562
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
fs/xfs/libxfs/xfs_btree.c
2563
error = xfs_btree_dup_cursor(cur, &tcur);
fs/xfs/libxfs/xfs_btree.c
2568
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
2586
error = xfs_btree_update_keys(cur, level);
fs/xfs/libxfs/xfs_btree.c
2591
cur->bc_levels[level].ptr--;
fs/xfs/libxfs/xfs_btree.c
2614
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2630
if (xfs_btree_at_iroot(cur, level))
fs/xfs/libxfs/xfs_btree.c
2634
left = xfs_btree_get_block(cur, level, &lbp);
fs/xfs/libxfs/xfs_btree.c
2637
error = xfs_btree_check_block(cur, left, level, lbp);
fs/xfs/libxfs/xfs_btree.c
2643
xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
2644
if (xfs_btree_ptr_is_null(cur, &rptr))
fs/xfs/libxfs/xfs_btree.c
2652
if (cur->bc_levels[level].ptr >= lrecs)
fs/xfs/libxfs/xfs_btree.c
2656
error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
fs/xfs/libxfs/xfs_btree.c
2662
if (rrecs == cur->bc_ops->get_maxrecs(cur, level))
fs/xfs/libxfs/xfs_btree.c
2665
XFS_BTREE_STATS_INC(cur, rshift);
fs/xfs/libxfs/xfs_btree.c
2666
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
fs/xfs/libxfs/xfs_btree.c
2678
lkp = xfs_btree_key_addr(cur, lrecs, left);
fs/xfs/libxfs/xfs_btree.c
2679
lpp = xfs_btree_ptr_addr(cur, lrecs, left);
fs/xfs/libxfs/xfs_btree.c
2680
rkp = xfs_btree_key_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2681
rpp = xfs_btree_ptr_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2684
error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
fs/xfs/libxfs/xfs_btree.c
2689
xfs_btree_shift_keys(cur, rkp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
269
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2690
xfs_btree_shift_ptrs(cur, rpp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2692
error = xfs_btree_debug_check_ptr(cur, lpp, 0, level);
fs/xfs/libxfs/xfs_btree.c
2697
xfs_btree_copy_keys(cur, rkp, lkp, 1);
fs/xfs/libxfs/xfs_btree.c
2698
xfs_btree_copy_ptrs(cur, rpp, lpp, 1);
fs/xfs/libxfs/xfs_btree.c
2700
xfs_btree_log_keys(cur, rbp, 1, rrecs + 1);
fs/xfs/libxfs/xfs_btree.c
2701
xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1);
fs/xfs/libxfs/xfs_btree.c
2703
ASSERT(cur->bc_ops->keys_inorder(cur, rkp,
fs/xfs/libxfs/xfs_btree.c
2704
xfs_btree_key_addr(cur, 2, right)));
fs/xfs/libxfs/xfs_btree.c
2710
lrp = xfs_btree_rec_addr(cur, lrecs, left);
fs/xfs/libxfs/xfs_btree.c
2711
rrp = xfs_btree_rec_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2713
xfs_btree_shift_recs(cur, rrp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2716
xfs_btree_copy_recs(cur, rrp, lrp, 1);
fs/xfs/libxfs/xfs_btree.c
2717
xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
fs/xfs/libxfs/xfs_btree.c
2724
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS);
fs/xfs/libxfs/xfs_btree.c
2727
xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS);
fs/xfs/libxfs/xfs_btree.c
2733
error = xfs_btree_dup_cursor(cur, &tcur);
fs/xfs/libxfs/xfs_btree.c
2738
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
274
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
2748
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
fs/xfs/libxfs/xfs_btree.c
2749
error = xfs_btree_update_keys(cur, level);
fs/xfs/libxfs/xfs_btree.c
276
return __xfs_btree_check_memblock(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
2778
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
278
return __xfs_btree_check_agblock(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
2792
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
fs/xfs/libxfs/xfs_btree.c
2797
error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat);
fs/xfs/libxfs/xfs_btree.c
2798
trace_xfs_btree_alloc_block(cur, new_block, *stat, error);
fs/xfs/libxfs/xfs_btree.c
280
return __xfs_btree_check_fsblock(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
2809
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
2831
XFS_BTREE_STATS_INC(cur, split);
fs/xfs/libxfs/xfs_btree.c
2834
left = xfs_btree_get_block(cur, level, &lbp);
fs/xfs/libxfs/xfs_btree.c
2837
error = xfs_btree_check_block(cur, left, level, lbp);
fs/xfs/libxfs/xfs_btree.c
2842
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
fs/xfs/libxfs/xfs_btree.c
2845
error = xfs_btree_alloc_block(cur, &lptr, &rptr, stat);
fs/xfs/libxfs/xfs_btree.c
2850
XFS_BTREE_STATS_INC(cur, alloc);
fs/xfs/libxfs/xfs_btree.c
2853
error = xfs_btree_get_buf_block(cur, &rptr, &right, &rbp);
fs/xfs/libxfs/xfs_btree.c
2858
xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0);
fs/xfs/libxfs/xfs_btree.c
2867
if ((lrecs & 1) && cur->bc_levels[level].ptr <= rrecs + 1)
fs/xfs/libxfs/xfs_btree.c
287
static inline unsigned int xfs_btree_block_errtag(struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
2871
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
fs/xfs/libxfs/xfs_btree.c
289
if (cur->bc_ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
2890
lkp = xfs_btree_key_addr(cur, src_index, left);
fs/xfs/libxfs/xfs_btree.c
2891
lpp = xfs_btree_ptr_addr(cur, src_index, left);
fs/xfs/libxfs/xfs_btree.c
2892
rkp = xfs_btree_key_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2893
rpp = xfs_btree_ptr_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2896
error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
fs/xfs/libxfs/xfs_btree.c
2902
xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
fs/xfs/libxfs/xfs_btree.c
2903
xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
fs/xfs/libxfs/xfs_btree.c
2905
xfs_btree_log_keys(cur, rbp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2906
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2909
xfs_btree_get_node_keys(cur, right, key);
fs/xfs/libxfs/xfs_btree.c
2915
lrp = xfs_btree_rec_addr(cur, src_index, left);
fs/xfs/libxfs/xfs_btree.c
2916
rrp = xfs_btree_rec_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
2919
xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
fs/xfs/libxfs/xfs_btree.c
2920
xfs_btree_log_recs(cur, rbp, 1, rrecs);
fs/xfs/libxfs/xfs_btree.c
2923
xfs_btree_get_leaf_keys(cur, right, key);
fs/xfs/libxfs/xfs_btree.c
2930
xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
2931
xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
2932
xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
2933
xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
2935
xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
fs/xfs/libxfs/xfs_btree.c
2936
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
2942
if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
fs/xfs/libxfs/xfs_btree.c
2943
error = xfs_btree_read_buf_block(cur, &rrptr,
fs/xfs/libxfs/xfs_btree.c
2947
xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
2948
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
2952
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
fs/xfs/libxfs/xfs_btree.c
2953
error = xfs_btree_update_keys(cur, level);
fs/xfs/libxfs/xfs_btree.c
2963
if (cur->bc_levels[level].ptr > lrecs + 1) {
fs/xfs/libxfs/xfs_btree.c
2964
xfs_btree_setbuf(cur, level, rbp);
fs/xfs/libxfs/xfs_btree.c
2965
cur->bc_levels[level].ptr -= lrecs;
fs/xfs/libxfs/xfs_btree.c
2971
if (level + 1 < cur->bc_nlevels) {
fs/xfs/libxfs/xfs_btree.c
2972
error = xfs_btree_dup_cursor(cur, curp);
fs/xfs/libxfs/xfs_btree.c
299
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
2990
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_btree.c
3024
xfs_trans_set_context(args->cur->bc_tp);
fs/xfs/libxfs/xfs_btree.c
3026
args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
fs/xfs/libxfs/xfs_btree.c
3029
xfs_trans_clear_context(args->cur->bc_tp);
fs/xfs/libxfs/xfs_btree.c
304
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
3060
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
307
fa = __xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
3070
if (!xfs_btree_is_bmap(cur->bc_ops) ||
fs/xfs/libxfs/xfs_btree.c
3071
cur->bc_tp->t_highest_agno == NULLAGNUMBER)
fs/xfs/libxfs/xfs_btree.c
3072
return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
fs/xfs/libxfs/xfs_btree.c
3074
args.cur = cur;
fs/xfs/libxfs/xfs_btree.c
309
XFS_TEST_ERROR(mp, xfs_btree_block_errtag(cur))) {
fs/xfs/libxfs/xfs_btree.c
3095
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
3109
rp = xfs_btree_rec_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
3110
crp = xfs_btree_rec_addr(cur, 1, cblock);
fs/xfs/libxfs/xfs_btree.c
3111
xfs_btree_copy_recs(cur, crp, rp, numrecs);
fs/xfs/libxfs/xfs_btree.c
312
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
3122
cur->bc_ops->broot_realloc(cur, 0);
fs/xfs/libxfs/xfs_btree.c
3123
cur->bc_nlevels++;
fs/xfs/libxfs/xfs_btree.c
3124
cur->bc_levels[1].ptr = 1;
fs/xfs/libxfs/xfs_btree.c
3130
broot = cur->bc_ops->broot_realloc(cur, 1);
fs/xfs/libxfs/xfs_btree.c
3131
xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops,
fs/xfs/libxfs/xfs_btree.c
3132
cur->bc_nlevels - 1, 1, cur->bc_ino.ip->i_ino);
fs/xfs/libxfs/xfs_btree.c
3134
pp = xfs_btree_ptr_addr(cur, 1, broot);
fs/xfs/libxfs/xfs_btree.c
3135
kp = xfs_btree_key_addr(cur, 1, broot);
fs/xfs/libxfs/xfs_btree.c
3136
xfs_btree_copy_ptrs(cur, pp, cptr, 1);
fs/xfs/libxfs/xfs_btree.c
3137
xfs_btree_get_keys(cur, cblock, kp);
fs/xfs/libxfs/xfs_btree.c
3140
xfs_btree_setbuf(cur, 0, cbp);
fs/xfs/libxfs/xfs_btree.c
3141
xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
fs/xfs/libxfs/xfs_btree.c
3142
xfs_btree_log_recs(cur, cbp, 1, numrecs);
fs/xfs/libxfs/xfs_btree.c
3154
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
3175
cur->bc_nlevels++;
fs/xfs/libxfs/xfs_btree.c
3176
cur->bc_levels[level + 1].ptr = 1;
fs/xfs/libxfs/xfs_btree.c
3183
kp = xfs_btree_key_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
3184
ckp = xfs_btree_key_addr(cur, 1, cblock);
fs/xfs/libxfs/xfs_btree.c
3185
xfs_btree_copy_keys(cur, ckp, kp, numrecs);
fs/xfs/libxfs/xfs_btree.c
3188
pp = xfs_btree_ptr_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
3189
cpp = xfs_btree_ptr_addr(cur, 1, cblock);
fs/xfs/libxfs/xfs_btree.c
3191
error = xfs_btree_debug_check_ptr(cur, pp, i, level);
fs/xfs/libxfs/xfs_btree.c
3195
xfs_btree_copy_ptrs(cur, cpp, pp, numrecs);
fs/xfs/libxfs/xfs_btree.c
320
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
3201
error = xfs_btree_debug_check_ptr(cur, cptr, 0, level);
fs/xfs/libxfs/xfs_btree.c
3204
xfs_btree_copy_ptrs(cur, pp, cptr, 1);
fs/xfs/libxfs/xfs_btree.c
3205
xfs_btree_get_keys(cur, cblock, kp);
fs/xfs/libxfs/xfs_btree.c
3207
cur->bc_ops->broot_realloc(cur, 1);
fs/xfs/libxfs/xfs_btree.c
3210
xfs_btree_setbuf(cur, level, cbp);
fs/xfs/libxfs/xfs_btree.c
3211
xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS);
fs/xfs/libxfs/xfs_btree.c
3212
xfs_btree_log_keys(cur, cbp, 1, numrecs);
fs/xfs/libxfs/xfs_btree.c
3213
xfs_btree_log_ptrs(cur, cbp, 1, numrecs);
fs/xfs/libxfs/xfs_btree.c
3223
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
3235
XFS_BTREE_STATS_INC(cur, newroot);
fs/xfs/libxfs/xfs_btree.c
3237
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
fs/xfs/libxfs/xfs_btree.c
3239
level = cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_btree.c
3241
block = xfs_btree_get_iroot(cur);
fs/xfs/libxfs/xfs_btree.c
3242
ASSERT(level > 0 || (cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS));
fs/xfs/libxfs/xfs_btree.c
3244
aptr = *xfs_btree_ptr_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
3246
aptr.l = cpu_to_be64(XFS_INO_TO_FSB(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
3247
cur->bc_ino.ip->i_ino));
fs/xfs/libxfs/xfs_btree.c
3250
error = xfs_btree_alloc_block(cur, &aptr, &nptr, stat);
fs/xfs/libxfs/xfs_btree.c
3256
XFS_BTREE_STATS_INC(cur, alloc);
fs/xfs/libxfs/xfs_btree.c
3259
error = xfs_btree_get_buf_block(cur, &nptr, &cblock, &cbp);
fs/xfs/libxfs/xfs_btree.c
3267
memcpy(cblock, block, xfs_btree_block_len(cur));
fs/xfs/libxfs/xfs_btree.c
3268
if (xfs_has_crc(cur->bc_mp)) {
fs/xfs/libxfs/xfs_btree.c
3270
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
3277
error = xfs_btree_promote_node_iroot(cur, block, level, cbp,
fs/xfs/libxfs/xfs_btree.c
328
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
3282
xfs_btree_promote_leaf_iroot(cur, block, cbp, &nptr, cblock);
fs/xfs/libxfs/xfs_btree.c
3285
*logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_btree.c
3294
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
3298
if (cur->bc_flags & XFS_BTREE_STAGING) {
fs/xfs/libxfs/xfs_btree.c
330
if (!xfbtree_verify_bno(cur->bc_mem.xfbtree,
fs/xfs/libxfs/xfs_btree.c
3300
cur->bc_ag.afake->af_root = be32_to_cpu(ptr->s);
fs/xfs/libxfs/xfs_btree.c
3301
cur->bc_ag.afake->af_levels += inc;
fs/xfs/libxfs/xfs_btree.c
3303
cur->bc_ops->set_root(cur, ptr, inc);
fs/xfs/libxfs/xfs_btree.c
3312
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
3328
XFS_BTREE_STATS_INC(cur, newroot);
fs/xfs/libxfs/xfs_btree.c
3331
xfs_btree_init_ptr_from_cur(cur, &rptr);
fs/xfs/libxfs/xfs_btree.c
3334
error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat);
fs/xfs/libxfs/xfs_btree.c
3339
XFS_BTREE_STATS_INC(cur, alloc);
fs/xfs/libxfs/xfs_btree.c
3342
error = xfs_btree_get_buf_block(cur, &lptr, &new, &nbp);
fs/xfs/libxfs/xfs_btree.c
3347
xfs_btree_set_root(cur, &lptr, 1);
fs/xfs/libxfs/xfs_btree.c
335
if (!xfs_verify_fsbno(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
3355
block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp);
fs/xfs/libxfs/xfs_btree.c
3358
error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp);
fs/xfs/libxfs/xfs_btree.c
3363
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
3364
if (!xfs_btree_ptr_is_null(cur, &rptr)) {
fs/xfs/libxfs/xfs_btree.c
3367
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
fs/xfs/libxfs/xfs_btree.c
3369
error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
fs/xfs/libxfs/xfs_btree.c
3377
xfs_btree_buf_to_ptr(cur, rbp, &rptr);
fs/xfs/libxfs/xfs_btree.c
3379
xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
3380
error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
fs/xfs/libxfs/xfs_btree.c
3388
xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
fs/xfs/libxfs/xfs_btree.c
3389
xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
fs/xfs/libxfs/xfs_btree.c
3390
ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) &&
fs/xfs/libxfs/xfs_btree.c
3391
!xfs_btree_ptr_is_null(cur, &rptr));
fs/xfs/libxfs/xfs_btree.c
3399
xfs_btree_get_node_keys(cur, left,
fs/xfs/libxfs/xfs_btree.c
340
if (!xfs_verify_agbno(to_perag(cur->bc_group),
fs/xfs/libxfs/xfs_btree.c
3400
xfs_btree_key_addr(cur, 1, new));
fs/xfs/libxfs/xfs_btree.c
3401
xfs_btree_get_node_keys(cur, right,
fs/xfs/libxfs/xfs_btree.c
3402
xfs_btree_key_addr(cur, 2, new));
fs/xfs/libxfs/xfs_btree.c
3409
xfs_btree_get_leaf_keys(cur, left,
fs/xfs/libxfs/xfs_btree.c
3410
xfs_btree_key_addr(cur, 1, new));
fs/xfs/libxfs/xfs_btree.c
3411
xfs_btree_get_leaf_keys(cur, right,
fs/xfs/libxfs/xfs_btree.c
3412
xfs_btree_key_addr(cur, 2, new));
fs/xfs/libxfs/xfs_btree.c
3414
xfs_btree_log_keys(cur, nbp, 1, 2);
fs/xfs/libxfs/xfs_btree.c
3417
xfs_btree_copy_ptrs(cur,
fs/xfs/libxfs/xfs_btree.c
3418
xfs_btree_ptr_addr(cur, 1, new), &lptr, 1);
fs/xfs/libxfs/xfs_btree.c
3419
xfs_btree_copy_ptrs(cur,
fs/xfs/libxfs/xfs_btree.c
3420
xfs_btree_ptr_addr(cur, 2, new), &rptr, 1);
fs/xfs/libxfs/xfs_btree.c
3421
xfs_btree_log_ptrs(cur, nbp, 1, 2);
fs/xfs/libxfs/xfs_btree.c
3424
xfs_btree_setbuf(cur, cur->bc_nlevels, nbp);
fs/xfs/libxfs/xfs_btree.c
3425
cur->bc_levels[cur->bc_nlevels].ptr = nptr;
fs/xfs/libxfs/xfs_btree.c
3426
cur->bc_nlevels++;
fs/xfs/libxfs/xfs_btree.c
3427
ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
fs/xfs/libxfs/xfs_btree.c
3439
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
3451
if (xfs_btree_at_iroot(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
3452
struct xfs_inode *ip = cur->bc_ino.ip;
fs/xfs/libxfs/xfs_btree.c
3454
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
3456
cur->bc_ops->broot_realloc(cur, numrecs + 1);
fs/xfs/libxfs/xfs_btree.c
3462
error = xfs_btree_new_iroot(cur, &logflags, stat);
fs/xfs/libxfs/xfs_btree.c
3466
xfs_trans_log_inode(cur->bc_tp, ip, logflags);
fs/xfs/libxfs/xfs_btree.c
3473
error = xfs_btree_rshift(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
3478
error = xfs_btree_lshift(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
3483
*oindex = *index = cur->bc_levels[level].ptr;
fs/xfs/libxfs/xfs_btree.c
3493
error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
fs/xfs/libxfs/xfs_btree.c
3498
*index = cur->bc_levels[level].ptr;
fs/xfs/libxfs/xfs_btree.c
3508
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
3536
if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE &&
fs/xfs/libxfs/xfs_btree.c
3537
level >= cur->bc_nlevels) {
fs/xfs/libxfs/xfs_btree.c
3538
error = xfs_btree_new_root(cur, stat);
fs/xfs/libxfs/xfs_btree.c
3539
xfs_btree_set_ptr_null(cur, ptrp);
fs/xfs/libxfs/xfs_btree.c
3545
ptr = cur->bc_levels[level].ptr;
fs/xfs/libxfs/xfs_btree.c
355
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
3553
XFS_BTREE_STATS_INC(cur, insrec);
fs/xfs/libxfs/xfs_btree.c
3556
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
3561
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
3568
ASSERT(cur->bc_ops->recs_inorder(cur, rec,
fs/xfs/libxfs/xfs_btree.c
3569
xfs_btree_rec_addr(cur, ptr, block)));
fs/xfs/libxfs/xfs_btree.c
3571
ASSERT(cur->bc_ops->keys_inorder(cur, key,
fs/xfs/libxfs/xfs_btree.c
3572
xfs_btree_key_addr(cur, ptr, block)));
fs/xfs/libxfs/xfs_btree.c
3581
xfs_btree_set_ptr_null(cur, &nptr);
fs/xfs/libxfs/xfs_btree.c
3582
if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
3583
error = xfs_btree_make_block_unfull(cur, level, numrecs,
fs/xfs/libxfs/xfs_btree.c
3593
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
3597
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
3606
XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
fs/xfs/libxfs/xfs_btree.c
3613
kp = xfs_btree_key_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
3614
pp = xfs_btree_ptr_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
3617
error = xfs_btree_debug_check_ptr(cur, pp, i, level);
fs/xfs/libxfs/xfs_btree.c
362
error = __xfs_btree_check_ptr(cur, ptr, index, level);
fs/xfs/libxfs/xfs_btree.c
3622
xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
fs/xfs/libxfs/xfs_btree.c
3623
xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
fs/xfs/libxfs/xfs_btree.c
3625
error = xfs_btree_debug_check_ptr(cur, ptrp, 0, level);
fs/xfs/libxfs/xfs_btree.c
3630
xfs_btree_copy_keys(cur, kp, key, 1);
fs/xfs/libxfs/xfs_btree.c
3631
xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
fs/xfs/libxfs/xfs_btree.c
3634
xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
fs/xfs/libxfs/xfs_btree.c
3635
xfs_btree_log_keys(cur, bp, ptr, numrecs);
fs/xfs/libxfs/xfs_btree.c
3638
ASSERT(cur->bc_ops->keys_inorder(cur, kp,
fs/xfs/libxfs/xfs_btree.c
3639
xfs_btree_key_addr(cur, ptr + 1, block)));
fs/xfs/libxfs/xfs_btree.c
364
switch (cur->bc_ops->type) {
fs/xfs/libxfs/xfs_btree.c
3646
rp = xfs_btree_rec_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
3648
xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
fs/xfs/libxfs/xfs_btree.c
3651
xfs_btree_copy_recs(cur, rp, rec, 1);
fs/xfs/libxfs/xfs_btree.c
3653
xfs_btree_log_recs(cur, bp, ptr, numrecs);
fs/xfs/libxfs/xfs_btree.c
3656
ASSERT(cur->bc_ops->recs_inorder(cur, rp,
fs/xfs/libxfs/xfs_btree.c
3657
xfs_btree_rec_addr(cur, ptr + 1, block)));
fs/xfs/libxfs/xfs_btree.c
366
xfs_err(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
3663
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
fs/xfs/libxfs/xfs_btree.c
368
cur->bc_ops->name, cur->bc_flags, level, index,
fs/xfs/libxfs/xfs_btree.c
3689
if (!xfs_btree_ptr_is_null(cur, &nptr) &&
fs/xfs/libxfs/xfs_btree.c
3691
xfs_btree_get_keys(cur, block, lkey);
fs/xfs/libxfs/xfs_btree.c
3692
} else if (xfs_btree_needs_key_update(cur, optr)) {
fs/xfs/libxfs/xfs_btree.c
3693
error = xfs_btree_update_keys(cur, level);
fs/xfs/libxfs/xfs_btree.c
3703
if (!xfs_btree_ptr_is_null(cur, &nptr)) {
fs/xfs/libxfs/xfs_btree.c
3704
xfs_btree_copy_keys(cur, key, lkey, 1);
fs/xfs/libxfs/xfs_btree.c
372
xfs_err(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
3726
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
374
cur->bc_ino.ip->i_ino,
fs/xfs/libxfs/xfs_btree.c
3741
pcur = cur;
fs/xfs/libxfs/xfs_btree.c
3744
xfs_btree_set_ptr_null(cur, &nptr);
fs/xfs/libxfs/xfs_btree.c
3747
cur->bc_ops->init_rec_from_cur(cur, &rec);
fs/xfs/libxfs/xfs_btree.c
3748
cur->bc_ops->init_key_from_rec(key, &rec);
fs/xfs/libxfs/xfs_btree.c
375
cur->bc_ino.whichfork, cur->bc_ops->name,
fs/xfs/libxfs/xfs_btree.c
3763
if (pcur != cur)
fs/xfs/libxfs/xfs_btree.c
3768
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
3769
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
3780
if (pcur != cur &&
fs/xfs/libxfs/xfs_btree.c
3781
(ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
fs/xfs/libxfs/xfs_btree.c
3783
if (cur->bc_ops->update_cursor &&
fs/xfs/libxfs/xfs_btree.c
3784
!(cur->bc_flags & XFS_BTREE_STAGING))
fs/xfs/libxfs/xfs_btree.c
3785
cur->bc_ops->update_cursor(pcur, cur);
fs/xfs/libxfs/xfs_btree.c
3786
cur->bc_nlevels = pcur->bc_nlevels;
fs/xfs/libxfs/xfs_btree.c
379
xfs_err(cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
3794
} while (!xfs_btree_ptr_is_null(cur, &nptr));
fs/xfs/libxfs/xfs_btree.c
3805
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
381
cur->bc_group->xg_gno, cur->bc_ops->name,
fs/xfs/libxfs/xfs_btree.c
3821
cur->bc_ops->broot_realloc(cur, 0);
fs/xfs/libxfs/xfs_btree.c
3822
cur->bc_nlevels--;
fs/xfs/libxfs/xfs_btree.c
3828
broot = cur->bc_ops->broot_realloc(cur, numrecs);
fs/xfs/libxfs/xfs_btree.c
3829
xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, 0, numrecs,
fs/xfs/libxfs/xfs_btree.c
3830
cur->bc_ino.ip->i_ino);
fs/xfs/libxfs/xfs_btree.c
3832
rp = xfs_btree_rec_addr(cur, 1, broot);
fs/xfs/libxfs/xfs_btree.c
3833
crp = xfs_btree_rec_addr(cur, 1, cblock);
fs/xfs/libxfs/xfs_btree.c
3834
xfs_btree_copy_recs(cur, rp, crp, numrecs);
fs/xfs/libxfs/xfs_btree.c
3836
cur->bc_levels[0].bp = NULL;
fs/xfs/libxfs/xfs_btree.c
3848
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
385
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
3866
block = cur->bc_ops->broot_realloc(cur, numrecs);
fs/xfs/libxfs/xfs_btree.c
3872
kp = xfs_btree_key_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
3873
ckp = xfs_btree_key_addr(cur, 1, cblock);
fs/xfs/libxfs/xfs_btree.c
3874
xfs_btree_copy_keys(cur, kp, ckp, numrecs);
fs/xfs/libxfs/xfs_btree.c
3877
pp = xfs_btree_ptr_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
3878
cpp = xfs_btree_ptr_addr(cur, 1, cblock);
fs/xfs/libxfs/xfs_btree.c
3880
error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1);
fs/xfs/libxfs/xfs_btree.c
3884
xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
fs/xfs/libxfs/xfs_btree.c
3887
cur->bc_levels[level - 1].bp = NULL;
fs/xfs/libxfs/xfs_btree.c
3889
cur->bc_nlevels--;
fs/xfs/libxfs/xfs_btree.c
3903
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
3905
struct xfs_inode *ip = cur->bc_ino.ip;
fs/xfs/libxfs/xfs_btree.c
3916
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
fs/xfs/libxfs/xfs_btree.c
3917
ASSERT((cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS) ||
fs/xfs/libxfs/xfs_btree.c
3918
cur->bc_nlevels > 1);
fs/xfs/libxfs/xfs_btree.c
3924
level = cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_btree.c
3925
if (level == 1 && !(cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS))
fs/xfs/libxfs/xfs_btree.c
3935
block = xfs_btree_get_iroot(cur);
fs/xfs/libxfs/xfs_btree.c
3939
cblock = xfs_btree_get_block(cur, level - 1, &cbp);
fs/xfs/libxfs/xfs_btree.c
3947
if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level))
fs/xfs/libxfs/xfs_btree.c
3950
XFS_BTREE_STATS_INC(cur, killroot);
fs/xfs/libxfs/xfs_btree.c
3953
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
3954
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
fs/xfs/libxfs/xfs_btree.c
3955
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
3956
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
fs/xfs/libxfs/xfs_btree.c
3960
error = xfs_btree_demote_node_child(cur, cblock, level,
fs/xfs/libxfs/xfs_btree.c
3965
xfs_btree_demote_leaf_child(cur, cblock, numrecs);
fs/xfs/libxfs/xfs_btree.c
3967
error = xfs_btree_free_block(cur, cbp);
fs/xfs/libxfs/xfs_btree.c
3971
xfs_trans_log_inode(cur->bc_tp, ip,
fs/xfs/libxfs/xfs_btree.c
3972
XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_btree.c
3982
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
3989
XFS_BTREE_STATS_INC(cur, killroot);
fs/xfs/libxfs/xfs_btree.c
3995
xfs_btree_set_root(cur, newroot, -1);
fs/xfs/libxfs/xfs_btree.c
3997
error = xfs_btree_free_block(cur, bp);
fs/xfs/libxfs/xfs_btree.c
4001
cur->bc_levels[level].bp = NULL;
fs/xfs/libxfs/xfs_btree.c
4002
cur->bc_levels[level].ra = 0;
fs/xfs/libxfs/xfs_btree.c
4003
cur->bc_nlevels--;
fs/xfs/libxfs/xfs_btree.c
4010
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4018
error = xfs_btree_decrement(cur, level, &i);
fs/xfs/libxfs/xfs_btree.c
4035
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
4061
ptr = cur->bc_levels[level].ptr;
fs/xfs/libxfs/xfs_btree.c
4068
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
4072
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
4083
XFS_BTREE_STATS_INC(cur, delrec);
fs/xfs/libxfs/xfs_btree.c
4084
XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr);
fs/xfs/libxfs/xfs_btree.c
4092
lkp = xfs_btree_key_addr(cur, ptr + 1, block);
fs/xfs/libxfs/xfs_btree.c
4093
lpp = xfs_btree_ptr_addr(cur, ptr + 1, block);
fs/xfs/libxfs/xfs_btree.c
4096
error = xfs_btree_debug_check_ptr(cur, lpp, i, level);
fs/xfs/libxfs/xfs_btree.c
4102
xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr);
fs/xfs/libxfs/xfs_btree.c
4103
xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr);
fs/xfs/libxfs/xfs_btree.c
4104
xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
fs/xfs/libxfs/xfs_btree.c
4105
xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
fs/xfs/libxfs/xfs_btree.c
4110
xfs_btree_shift_recs(cur,
fs/xfs/libxfs/xfs_btree.c
4111
xfs_btree_rec_addr(cur, ptr + 1, block),
fs/xfs/libxfs/xfs_btree.c
4113
xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
fs/xfs/libxfs/xfs_btree.c
4121
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
fs/xfs/libxfs/xfs_btree.c
4128
if (xfs_btree_at_iroot(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
4129
cur->bc_ops->broot_realloc(cur, numrecs);
fs/xfs/libxfs/xfs_btree.c
4131
error = xfs_btree_kill_iroot(cur);
fs/xfs/libxfs/xfs_btree.c
4135
error = xfs_btree_dec_cursor(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
4146
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_btree.c
4153
pp = xfs_btree_ptr_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
4154
error = xfs_btree_kill_root(cur, bp, level, pp);
fs/xfs/libxfs/xfs_btree.c
4158
error = xfs_btree_dec_cursor(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
4170
if (xfs_btree_needs_key_update(cur, ptr)) {
fs/xfs/libxfs/xfs_btree.c
4171
error = xfs_btree_update_keys(cur, level);
fs/xfs/libxfs/xfs_btree.c
4180
if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
4181
error = xfs_btree_dec_cursor(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
4192
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
4193
xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
4195
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
fs/xfs/libxfs/xfs_btree.c
4201
if (xfs_btree_ptr_is_null(cur, &rptr) &&
fs/xfs/libxfs/xfs_btree.c
4202
xfs_btree_ptr_is_null(cur, &lptr) &&
fs/xfs/libxfs/xfs_btree.c
4203
level == cur->bc_nlevels - 2) {
fs/xfs/libxfs/xfs_btree.c
4204
error = xfs_btree_kill_iroot(cur);
fs/xfs/libxfs/xfs_btree.c
4206
error = xfs_btree_dec_cursor(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
4213
ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) ||
fs/xfs/libxfs/xfs_btree.c
4214
!xfs_btree_ptr_is_null(cur, &lptr));
fs/xfs/libxfs/xfs_btree.c
4220
error = xfs_btree_dup_cursor(cur, &tcur);
fs/xfs/libxfs/xfs_btree.c
4228
if (!xfs_btree_ptr_is_null(cur, &rptr)) {
fs/xfs/libxfs/xfs_btree.c
4234
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4235
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4243
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4244
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4250
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4251
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4272
cur->bc_ops->get_minrecs(tcur, level)) {
fs/xfs/libxfs/xfs_btree.c
4278
cur->bc_ops->get_minrecs(tcur, level));
fs/xfs/libxfs/xfs_btree.c
4283
error = xfs_btree_dec_cursor(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
4296
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
fs/xfs/libxfs/xfs_btree.c
4298
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4299
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4307
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4308
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4319
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
fs/xfs/libxfs/xfs_btree.c
4325
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4326
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4335
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_btree.c
4336
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4344
error = xfs_btree_check_block(cur, left, level, lbp);
fs/xfs/libxfs/xfs_btree.c
4357
cur->bc_ops->get_minrecs(tcur, level)) {
fs/xfs/libxfs/xfs_btree.c
4363
cur->bc_ops->get_minrecs(tcur, level));
fs/xfs/libxfs/xfs_btree.c
4367
cur->bc_levels[0].ptr++;
fs/xfs/libxfs/xfs_btree.c
4386
ASSERT(!xfs_btree_ptr_is_null(cur, &cptr));
fs/xfs/libxfs/xfs_btree.c
4388
if (!xfs_btree_ptr_is_null(cur, &lptr) &&
fs/xfs/libxfs/xfs_btree.c
4390
cur->bc_ops->get_maxrecs(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
4398
error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp);
fs/xfs/libxfs/xfs_btree.c
4405
} else if (!xfs_btree_ptr_is_null(cur, &rptr) &&
fs/xfs/libxfs/xfs_btree.c
4407
cur->bc_ops->get_maxrecs(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
4415
error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp);
fs/xfs/libxfs/xfs_btree.c
4424
error = xfs_btree_dec_cursor(cur, level, stat);
fs/xfs/libxfs/xfs_btree.c
4437
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
fs/xfs/libxfs/xfs_btree.c
4445
lkp = xfs_btree_key_addr(cur, lrecs + 1, left);
fs/xfs/libxfs/xfs_btree.c
4446
lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left);
fs/xfs/libxfs/xfs_btree.c
4447
rkp = xfs_btree_key_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
4448
rpp = xfs_btree_ptr_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
4451
error = xfs_btree_debug_check_ptr(cur, rpp, i, level);
fs/xfs/libxfs/xfs_btree.c
4456
xfs_btree_copy_keys(cur, lkp, rkp, rrecs);
fs/xfs/libxfs/xfs_btree.c
4457
xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs);
fs/xfs/libxfs/xfs_btree.c
4459
xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs);
fs/xfs/libxfs/xfs_btree.c
4460
xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs);
fs/xfs/libxfs/xfs_btree.c
4466
lrp = xfs_btree_rec_addr(cur, lrecs + 1, left);
fs/xfs/libxfs/xfs_btree.c
4467
rrp = xfs_btree_rec_addr(cur, 1, right);
fs/xfs/libxfs/xfs_btree.c
4469
xfs_btree_copy_recs(cur, lrp, rrp, rrecs);
fs/xfs/libxfs/xfs_btree.c
4470
xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs);
fs/xfs/libxfs/xfs_btree.c
4473
XFS_BTREE_STATS_INC(cur, join);
fs/xfs/libxfs/xfs_btree.c
4480
xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
4481
xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
4482
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
4485
xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
4486
if (!xfs_btree_ptr_is_null(cur, &cptr)) {
fs/xfs/libxfs/xfs_btree.c
4487
error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp);
fs/xfs/libxfs/xfs_btree.c
4490
xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
4491
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree.c
4495
error = xfs_btree_free_block(cur, rbp);
fs/xfs/libxfs/xfs_btree.c
4504
cur->bc_levels[level].bp = lbp;
fs/xfs/libxfs/xfs_btree.c
4505
cur->bc_levels[level].ptr += lrecs;
fs/xfs/libxfs/xfs_btree.c
4506
cur->bc_levels[level].ra = 0;
fs/xfs/libxfs/xfs_btree.c
4512
else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE ||
fs/xfs/libxfs/xfs_btree.c
4513
level + 1 < cur->bc_nlevels) {
fs/xfs/libxfs/xfs_btree.c
4514
error = xfs_btree_increment(cur, level + 1, &i);
fs/xfs/libxfs/xfs_btree.c
4526
cur->bc_levels[level].ptr--;
fs/xfs/libxfs/xfs_btree.c
4555
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4570
error = xfs_btree_delrec(cur, level, &i);
fs/xfs/libxfs/xfs_btree.c
4581
if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) {
fs/xfs/libxfs/xfs_btree.c
4582
error = xfs_btree_updkeys_force(cur, 0);
fs/xfs/libxfs/xfs_btree.c
4588
for (level = 1; level < cur->bc_nlevels; level++) {
fs/xfs/libxfs/xfs_btree.c
4589
if (cur->bc_levels[level].ptr == 0) {
fs/xfs/libxfs/xfs_btree.c
4590
error = xfs_btree_decrement(cur, level, &i);
fs/xfs/libxfs/xfs_btree.c
4609
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
4620
ptr = cur->bc_levels[0].ptr;
fs/xfs/libxfs/xfs_btree.c
4621
block = xfs_btree_get_block(cur, 0, &bp);
fs/xfs/libxfs/xfs_btree.c
4624
error = xfs_btree_check_block(cur, block, 0, bp);
fs/xfs/libxfs/xfs_btree.c
4640
*recp = xfs_btree_rec_addr(cur, ptr, block);
fs/xfs/libxfs/xfs_btree.c
4648
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4659
xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
fs/xfs/libxfs/xfs_btree.c
4660
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
4663
error = fn(cur, level, data);
fs/xfs/libxfs/xfs_btree.c
4668
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree.c
4669
if (xfs_btree_ptr_is_null(cur, &rptr))
fs/xfs/libxfs/xfs_btree.c
4678
xfs_btree_buf_to_ptr(cur, bp, &bufptr);
fs/xfs/libxfs/xfs_btree.c
4679
if (xfs_btree_ptrs_equal(cur, &rptr, &bufptr)) {
fs/xfs/libxfs/xfs_btree.c
4680
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
4684
return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
fs/xfs/libxfs/xfs_btree.c
4691
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4701
xfs_btree_init_ptr_from_cur(cur, &lptr);
fs/xfs/libxfs/xfs_btree.c
4704
for (level = cur->bc_nlevels - 1; level >= 0; level--) {
fs/xfs/libxfs/xfs_btree.c
4706
error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
fs/xfs/libxfs/xfs_btree.c
4714
ptr = xfs_btree_ptr_addr(cur, 1, block);
fs/xfs/libxfs/xfs_btree.c
4715
xfs_btree_readahead_ptr(cur, ptr, 1);
fs/xfs/libxfs/xfs_btree.c
4718
xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
fs/xfs/libxfs/xfs_btree.c
4728
error = xfs_btree_visit_block(cur, level, fn, data);
fs/xfs/libxfs/xfs_btree.c
475
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4769
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4778
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
4779
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/libxfs/xfs_btree.c
4797
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
fs/xfs/libxfs/xfs_btree.c
4798
ASSERT(level == cur->bc_nlevels - 1);
fs/xfs/libxfs/xfs_btree.c
480
trace_xfs_btree_free_block(cur, bp);
fs/xfs/libxfs/xfs_btree.c
4802
if (cur->bc_tp) {
fs/xfs/libxfs/xfs_btree.c
4803
if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
fs/xfs/libxfs/xfs_btree.c
4804
xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
fs/xfs/libxfs/xfs_btree.c
4816
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
4825
return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
fs/xfs/libxfs/xfs_btree.c
486
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
fs/xfs/libxfs/xfs_btree.c
491
error = cur->bc_ops->free_block(cur, bp);
fs/xfs/libxfs/xfs_btree.c
493
xfs_trans_binval(cur->bc_tp, bp);
fs/xfs/libxfs/xfs_btree.c
494
XFS_BTREE_STATS_INC(cur, free);
fs/xfs/libxfs/xfs_btree.c
504
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
5049
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5061
ASSERT(cur->bc_ops->init_high_key_from_rec);
fs/xfs/libxfs/xfs_btree.c
5062
ASSERT(cur->bc_ops->cmp_two_keys);
fs/xfs/libxfs/xfs_btree.c
5069
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
fs/xfs/libxfs/xfs_btree.c
5075
error = xfs_btree_increment(cur, 0, &stat);
fs/xfs/libxfs/xfs_btree.c
5082
error = xfs_btree_get_rec(cur, &recp, &stat);
fs/xfs/libxfs/xfs_btree.c
5088
cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
fs/xfs/libxfs/xfs_btree.c
5090
if (xfs_btree_keycmp_gt(cur, low_key, &rec_key))
fs/xfs/libxfs/xfs_btree.c
5095
cur->bc_ops->init_key_from_rec(&rec_key, recp);
fs/xfs/libxfs/xfs_btree.c
5096
if (xfs_btree_keycmp_gt(cur, &rec_key, high_key))
fs/xfs/libxfs/xfs_btree.c
5100
error = fn(cur, recp, priv);
fs/xfs/libxfs/xfs_btree.c
5106
error = xfs_btree_increment(cur, 0, &stat);
fs/xfs/libxfs/xfs_btree.c
5136
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5156
level = cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_btree.c
5157
xfs_btree_init_ptr_from_cur(cur, &ptr);
fs/xfs/libxfs/xfs_btree.c
5158
error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
fs/xfs/libxfs/xfs_btree.c
516
for (i = 0; i < cur->bc_nlevels; i++) {
fs/xfs/libxfs/xfs_btree.c
5161
xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
5162
trace_xfs_btree_overlapped_query_range(cur, level, bp);
fs/xfs/libxfs/xfs_btree.c
5164
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
5168
cur->bc_levels[level].ptr = 1;
fs/xfs/libxfs/xfs_btree.c
517
if (cur->bc_levels[i].bp)
fs/xfs/libxfs/xfs_btree.c
5170
while (level < cur->bc_nlevels) {
fs/xfs/libxfs/xfs_btree.c
5171
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
5174
if (cur->bc_levels[level].ptr >
fs/xfs/libxfs/xfs_btree.c
5177
if (level < cur->bc_nlevels - 1)
fs/xfs/libxfs/xfs_btree.c
5178
cur->bc_levels[level + 1].ptr++;
fs/xfs/libxfs/xfs_btree.c
518
xfs_trans_brelse(cur->bc_tp, cur->bc_levels[i].bp);
fs/xfs/libxfs/xfs_btree.c
5185
recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr,
fs/xfs/libxfs/xfs_btree.c
5188
cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
fs/xfs/libxfs/xfs_btree.c
5189
cur->bc_ops->init_key_from_rec(&rec_key, recp);
fs/xfs/libxfs/xfs_btree.c
5200
if (xfs_btree_keycmp_lt(cur, high_key, &rec_key))
fs/xfs/libxfs/xfs_btree.c
5202
if (xfs_btree_keycmp_ge(cur, &rec_hkey, low_key)) {
fs/xfs/libxfs/xfs_btree.c
5203
error = fn(cur, recp, priv);
fs/xfs/libxfs/xfs_btree.c
5207
cur->bc_levels[level].ptr++;
fs/xfs/libxfs/xfs_btree.c
5212
lkp = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block);
fs/xfs/libxfs/xfs_btree.c
5213
hkp = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr,
fs/xfs/libxfs/xfs_btree.c
5215
pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block);
fs/xfs/libxfs/xfs_btree.c
5226
if (xfs_btree_keycmp_lt(cur, high_key, lkp))
fs/xfs/libxfs/xfs_btree.c
5228
if (xfs_btree_keycmp_ge(cur, hkp, low_key)) {
fs/xfs/libxfs/xfs_btree.c
5230
error = xfs_btree_lookup_get_block(cur, level, pp,
fs/xfs/libxfs/xfs_btree.c
5234
xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
5235
trace_xfs_btree_overlapped_query_range(cur, level, bp);
fs/xfs/libxfs/xfs_btree.c
5237
error = xfs_btree_check_block(cur, block, level, bp);
fs/xfs/libxfs/xfs_btree.c
5241
cur->bc_levels[level].ptr = 1;
fs/xfs/libxfs/xfs_btree.c
5244
cur->bc_levels[level].ptr++;
fs/xfs/libxfs/xfs_btree.c
5255
if (cur->bc_levels[0].bp == NULL) {
fs/xfs/libxfs/xfs_btree.c
5256
for (i = 0; i < cur->bc_nlevels; i++) {
fs/xfs/libxfs/xfs_btree.c
5257
if (cur->bc_levels[i].bp) {
fs/xfs/libxfs/xfs_btree.c
5258
xfs_trans_brelse(cur->bc_tp,
fs/xfs/libxfs/xfs_btree.c
5259
cur->bc_levels[i].bp);
fs/xfs/libxfs/xfs_btree.c
5260
cur->bc_levels[i].bp = NULL;
fs/xfs/libxfs/xfs_btree.c
5261
cur->bc_levels[i].ptr = 0;
fs/xfs/libxfs/xfs_btree.c
5262
cur->bc_levels[i].ra = 0;
fs/xfs/libxfs/xfs_btree.c
5272
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5278
cur->bc_rec = *irec;
fs/xfs/libxfs/xfs_btree.c
5279
cur->bc_ops->init_rec_from_cur(cur, &rec);
fs/xfs/libxfs/xfs_btree.c
5280
cur->bc_ops->init_key_from_rec(key, &rec);
fs/xfs/libxfs/xfs_btree.c
529
ASSERT(!xfs_btree_is_bmap(cur->bc_ops) || cur->bc_bmap.allocated == 0 ||
fs/xfs/libxfs/xfs_btree.c
5291
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
530
xfs_is_shutdown(cur->bc_mp) || error != 0);
fs/xfs/libxfs/xfs_btree.c
5301
xfs_btree_key_from_irec(cur, &high_key, high_rec);
fs/xfs/libxfs/xfs_btree.c
5302
xfs_btree_key_from_irec(cur, &low_key, low_rec);
fs/xfs/libxfs/xfs_btree.c
5305
if (!xfs_btree_keycmp_le(cur, &low_key, &high_key))
fs/xfs/libxfs/xfs_btree.c
5308
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
fs/xfs/libxfs/xfs_btree.c
5309
return xfs_btree_simple_query_range(cur, &low_key,
fs/xfs/libxfs/xfs_btree.c
5311
return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
fs/xfs/libxfs/xfs_btree.c
5318
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
532
if (cur->bc_group)
fs/xfs/libxfs/xfs_btree.c
5325
memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
fs/xfs/libxfs/xfs_btree.c
5329
return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv);
fs/xfs/libxfs/xfs_btree.c
533
xfs_group_put(cur->bc_group);
fs/xfs/libxfs/xfs_btree.c
5334
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
534
kmem_cache_free(cur->bc_cache, cur);
fs/xfs/libxfs/xfs_btree.c
5347
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5351
return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
fs/xfs/libxfs/xfs_btree.c
5358
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5362
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
5383
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5392
cur->bc_ops->init_key_from_rec(&rec_key, rec);
fs/xfs/libxfs/xfs_btree.c
540
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
5402
if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key,
fs/xfs/libxfs/xfs_btree.c
5413
key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
fs/xfs/libxfs/xfs_btree.c
5416
!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
fs/xfs/libxfs/xfs_btree.c
542
if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM)
fs/xfs/libxfs/xfs_btree.c
5426
cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec);
fs/xfs/libxfs/xfs_btree.c
5427
if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key,
fs/xfs/libxfs/xfs_btree.c
543
return cur->bc_mem.xfbtree->target;
fs/xfs/libxfs/xfs_btree.c
544
return cur->bc_mp->m_ddev_targp;
fs/xfs/libxfs/xfs_btree.c
5450
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5463
if (!cur->bc_ops->keys_contiguous) {
fs/xfs/libxfs/xfs_btree.c
5468
xfs_btree_key_from_irec(cur, &info.start_key, low);
fs/xfs/libxfs/xfs_btree.c
5469
xfs_btree_key_from_irec(cur, &info.end_key, high);
fs/xfs/libxfs/xfs_btree.c
5471
error = xfs_btree_query_range(cur, low, high,
fs/xfs/libxfs/xfs_btree.c
5486
if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key,
fs/xfs/libxfs/xfs_btree.c
5498
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
550
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
5503
block = xfs_btree_get_block(cur, 0, &bp);
fs/xfs/libxfs/xfs_btree.c
5506
if (cur->bc_levels[0].ptr < xfs_btree_get_numrecs(block))
fs/xfs/libxfs/xfs_btree.c
5510
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.c
552
if (cur->bc_ops->type == XFS_BTREE_TYPE_MEM)
fs/xfs/libxfs/xfs_btree.c
554
return cur->bc_mp->m_bsize;
fs/xfs/libxfs/xfs_btree.c
5566
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
5571
memset(&cur->bc_rec, 0, sizeof(cur->bc_rec));
fs/xfs/libxfs/xfs_btree.c
5572
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
fs/xfs/libxfs/xfs_btree.c
5578
error = xfs_btree_decrement(cur, 0, &stat);
fs/xfs/libxfs/xfs_btree.c
5583
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_btree.c
5593
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5599
.mp = cur->bc_mp,
fs/xfs/libxfs/xfs_btree.c
5600
.tp = cur->bc_tp,
fs/xfs/libxfs/xfs_btree.c
5606
struct xfs_inode *ip = cur->bc_ino.ip;
fs/xfs/libxfs/xfs_btree.c
5611
xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_btree.c
5613
XFS_INO_TO_FSB(cur->bc_mp, ip->i_ino));
fs/xfs/libxfs/xfs_btree.c
563
struct xfs_btree_cur *cur, /* input cursor */
fs/xfs/libxfs/xfs_btree.c
5632
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
5636
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
5637
struct xfs_inode *ip = cur->bc_ino.ip;
fs/xfs/libxfs/xfs_btree.c
5638
struct xfs_trans *tp = cur->bc_tp;
fs/xfs/libxfs/xfs_btree.c
5644
xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_btree.c
566
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
567
struct xfs_trans *tp = cur->bc_tp;
fs/xfs/libxfs/xfs_btree.c
577
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING)) {
fs/xfs/libxfs/xfs_btree.c
585
new = cur->bc_ops->dup_cursor(cur);
fs/xfs/libxfs/xfs_btree.c
590
new->bc_rec = cur->bc_rec;
fs/xfs/libxfs/xfs_btree.c
596
new->bc_levels[i].ptr = cur->bc_levels[i].ptr;
fs/xfs/libxfs/xfs_btree.c
597
new->bc_levels[i].ra = cur->bc_levels[i].ra;
fs/xfs/libxfs/xfs_btree.c
598
bp = cur->bc_levels[i].bp;
fs/xfs/libxfs/xfs_btree.c
601
xfs_btree_buftarg(cur),
fs/xfs/libxfs/xfs_btree.c
603
xfs_btree_bbsize(cur), 0, &bp,
fs/xfs/libxfs/xfs_btree.c
604
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
696
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
698
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/libxfs/xfs_btree.c
699
if (xfs_has_crc(cur->bc_mp))
fs/xfs/libxfs/xfs_btree.c
703
if (xfs_has_crc(cur->bc_mp))
fs/xfs/libxfs/xfs_btree.c
713
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
716
return xfs_btree_block_len(cur) +
fs/xfs/libxfs/xfs_btree.c
717
(n - 1) * cur->bc_ops->rec_len;
fs/xfs/libxfs/xfs_btree.c
725
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
728
return xfs_btree_block_len(cur) +
fs/xfs/libxfs/xfs_btree.c
729
(n - 1) * cur->bc_ops->key_len;
fs/xfs/libxfs/xfs_btree.c
737
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
740
return xfs_btree_block_len(cur) +
fs/xfs/libxfs/xfs_btree.c
741
(n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
fs/xfs/libxfs/xfs_btree.c
749
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
753
return xfs_btree_block_len(cur) +
fs/xfs/libxfs/xfs_btree.c
754
cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
fs/xfs/libxfs/xfs_btree.c
755
(n - 1) * cur->bc_ops->ptr_len;
fs/xfs/libxfs/xfs_btree.c
763
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
768
((char *)block + xfs_btree_rec_offset(cur, n));
fs/xfs/libxfs/xfs_btree.c
776
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
781
((char *)block + xfs_btree_key_offset(cur, n));
fs/xfs/libxfs/xfs_btree.c
789
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
794
((char *)block + xfs_btree_high_key_offset(cur, n));
fs/xfs/libxfs/xfs_btree.c
802
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
811
((char *)block + xfs_btree_ptr_offset(cur, n, level));
fs/xfs/libxfs/xfs_btree.c
816
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
818
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
fs/xfs/libxfs/xfs_btree.c
820
if (cur->bc_flags & XFS_BTREE_STAGING)
fs/xfs/libxfs/xfs_btree.c
821
return cur->bc_ino.ifake->if_fork;
fs/xfs/libxfs/xfs_btree.c
822
return xfs_ifork_ptr(cur->bc_ino.ip, cur->bc_ino.whichfork);
fs/xfs/libxfs/xfs_btree.c
833
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree.c
835
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_btree.c
846
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
850
if (xfs_btree_at_iroot(cur, level)) {
fs/xfs/libxfs/xfs_btree.c
852
return xfs_btree_get_iroot(cur);
fs/xfs/libxfs/xfs_btree.c
855
*bpp = cur->bc_levels[level].bp;
fs/xfs/libxfs/xfs_btree.c
865
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
874
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
875
if (xfs_btree_check_block(cur, block, level, bp))
fs/xfs/libxfs/xfs_btree.c
885
cur->bc_levels[level].ptr = 1;
fs/xfs/libxfs/xfs_btree.c
895
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.c
904
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.c
905
if (xfs_btree_check_block(cur, block, level, bp))
fs/xfs/libxfs/xfs_btree.c
915
cur->bc_levels[level].ptr = be16_to_cpu(block->bb_numrecs);
fs/xfs/libxfs/xfs_btree.c
957
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
961
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_btree.c
968
mp->m_bsize, cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
974
mp->m_bsize, cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.c
983
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.c
987
struct xfs_buftarg *btp = cur->bc_mem.xfbtree->target;
fs/xfs/libxfs/xfs_btree.c
994
cur->bc_ops->buf_ops);
fs/xfs/libxfs/xfs_btree.h
147
void (*set_root)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
151
int (*alloc_block)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
155
int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp);
fs/xfs/libxfs/xfs_btree.h
158
int (*get_minrecs)(struct xfs_btree_cur *cur, int level);
fs/xfs/libxfs/xfs_btree.h
159
int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
fs/xfs/libxfs/xfs_btree.h
162
int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
fs/xfs/libxfs/xfs_btree.h
167
void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
169
void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
178
int (*cmp_key_with_cur)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
187
int (*cmp_two_keys)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
195
int (*keys_inorder)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
200
int (*recs_inorder)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
215
enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
233
struct xfs_btree_block *(*broot_realloc)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
346
xfs_failaddr_t __xfs_btree_check_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
348
int __xfs_btree_check_ptr(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
356
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.h
366
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_btree.h
375
struct xfs_btree_cur *cur, /* input cursor */
fs/xfs/libxfs/xfs_btree.h
411
int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner,
fs/xfs/libxfs/xfs_btree.h
484
typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
487
int xfs_btree_query_range(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
491
int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
fs/xfs/libxfs/xfs_btree.h
494
typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
fs/xfs/libxfs/xfs_btree.h
503
int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
506
int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_filblks_t *blocks);
fs/xfs/libxfs/xfs_btree.h
508
union xfs_btree_rec *xfs_btree_rec_addr(struct xfs_btree_cur *cur, int n,
fs/xfs/libxfs/xfs_btree.h
510
union xfs_btree_key *xfs_btree_key_addr(struct xfs_btree_cur *cur, int n,
fs/xfs/libxfs/xfs_btree.h
512
union xfs_btree_key *xfs_btree_high_key_addr(struct xfs_btree_cur *cur, int n,
fs/xfs/libxfs/xfs_btree.h
514
union xfs_btree_ptr *xfs_btree_ptr_addr(struct xfs_btree_cur *cur, int n,
fs/xfs/libxfs/xfs_btree.h
516
int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
fs/xfs/libxfs/xfs_btree.h
518
struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
520
bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
522
int xfs_btree_cmp_two_ptrs(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
525
void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
528
void xfs_btree_get_keys(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
530
union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
532
typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
536
int xfs_btree_has_records(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
542
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
fs/xfs/libxfs/xfs_btree.h
543
struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
fs/xfs/libxfs/xfs_btree.h
548
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
552
return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) < 0;
fs/xfs/libxfs/xfs_btree.h
557
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
561
return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) > 0;
fs/xfs/libxfs/xfs_btree.h
566
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
570
return cur->bc_ops->cmp_two_keys(cur, key1, key2, NULL) == 0;
fs/xfs/libxfs/xfs_btree.h
575
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
579
return !xfs_btree_keycmp_gt(cur, key1, key2);
fs/xfs/libxfs/xfs_btree.h
584
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
588
return !xfs_btree_keycmp_lt(cur, key1, key2);
fs/xfs/libxfs/xfs_btree.h
593
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
597
return !xfs_btree_keycmp_eq(cur, key1, key2);
fs/xfs/libxfs/xfs_btree.h
603
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
608
return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) < 0;
fs/xfs/libxfs/xfs_btree.h
613
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
618
return cur->bc_ops->cmp_two_keys(cur, key1, key2, mask) > 0;
fs/xfs/libxfs/xfs_btree.h
623
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
628
return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
fs/xfs/libxfs/xfs_btree.h
634
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
640
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/libxfs/xfs_btree.h
642
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/libxfs/xfs_btree.h
647
void xfs_btree_set_ptr_null(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
649
int xfs_btree_get_buf_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
652
int xfs_btree_read_buf_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
655
void xfs_btree_set_sibling(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
658
void xfs_btree_init_block_cur(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
660
void xfs_btree_copy_ptrs(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
663
void xfs_btree_copy_keys(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
666
void xfs_btree_init_ptr_from_cur(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
677
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_btree.h
683
cur = kmem_cache_zalloc(cache,
fs/xfs/libxfs/xfs_btree.h
685
cur->bc_ops = ops;
fs/xfs/libxfs/xfs_btree.h
686
cur->bc_tp = tp;
fs/xfs/libxfs/xfs_btree.h
687
cur->bc_mp = mp;
fs/xfs/libxfs/xfs_btree.h
688
cur->bc_maxlevels = maxlevels;
fs/xfs/libxfs/xfs_btree.h
689
cur->bc_cache = cache;
fs/xfs/libxfs/xfs_btree.h
691
return cur;
fs/xfs/libxfs/xfs_btree.h
697
int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
fs/xfs/libxfs/xfs_btree.h
702
const struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
705
return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
fs/xfs/libxfs/xfs_btree.h
706
level == cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_btree.h
709
int xfs_btree_alloc_metafile_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
712
int xfs_btree_free_metafile_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree.h
81
#define XFS_BTREE_STATS_INC(cur, stat) \
fs/xfs/libxfs/xfs_btree.h
82
XFS_STATS_INC_OFF((cur)->bc_mp, \
fs/xfs/libxfs/xfs_btree.h
83
(cur)->bc_ops->statoff + __XBTS_ ## stat)
fs/xfs/libxfs/xfs_btree.h
84
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
fs/xfs/libxfs/xfs_btree.h
85
XFS_STATS_ADD_OFF((cur)->bc_mp, \
fs/xfs/libxfs/xfs_btree.h
86
(cur)->bc_ops->statoff + __XBTS_ ## stat, val)
fs/xfs/libxfs/xfs_btree_mem.c
163
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.c
168
struct xfbtree *xfbt = cur->bc_mem.xfbtree;
fs/xfs/libxfs/xfs_btree_mem.c
171
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
fs/xfs/libxfs/xfs_btree_mem.c
173
trace_xfbtree_alloc_block(xfbt, cur, bno);
fs/xfs/libxfs/xfs_btree_mem.c
190
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.c
193
struct xfbtree *xfbt = cur->bc_mem.xfbtree;
fs/xfs/libxfs/xfs_btree_mem.c
197
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
fs/xfs/libxfs/xfs_btree_mem.c
199
trace_xfbtree_free_block(xfbt, cur, bno);
fs/xfs/libxfs/xfs_btree_mem.c
210
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.c
213
struct xfbtree *xfbt = cur->bc_mem.xfbtree;
fs/xfs/libxfs/xfs_btree_mem.c
221
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.c
224
struct xfbtree *xfbt = cur->bc_mem.xfbtree;
fs/xfs/libxfs/xfs_btree_mem.c
26
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.c
30
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
fs/xfs/libxfs/xfs_btree_mem.c
32
cur->bc_mem.xfbtree->root = *ptr;
fs/xfs/libxfs/xfs_btree_mem.c
33
cur->bc_mem.xfbtree->nlevels += inc;
fs/xfs/libxfs/xfs_btree_mem.c
39
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.c
42
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
fs/xfs/libxfs/xfs_btree_mem.c
44
*ptr = cur->bc_mem.xfbtree->root;
fs/xfs/libxfs/xfs_btree_mem.c
50
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_btree_mem.c
54
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
fs/xfs/libxfs/xfs_btree_mem.c
56
ncur = xfs_btree_alloc_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ops,
fs/xfs/libxfs/xfs_btree_mem.c
57
cur->bc_maxlevels, cur->bc_cache);
fs/xfs/libxfs/xfs_btree_mem.c
58
ncur->bc_flags = cur->bc_flags;
fs/xfs/libxfs/xfs_btree_mem.c
59
ncur->bc_nlevels = cur->bc_nlevels;
fs/xfs/libxfs/xfs_btree_mem.c
60
ncur->bc_mem.xfbtree = cur->bc_mem.xfbtree;
fs/xfs/libxfs/xfs_btree_mem.c
61
if (cur->bc_group)
fs/xfs/libxfs/xfs_btree_mem.c
62
ncur->bc_group = xfs_group_hold(cur->bc_group);
fs/xfs/libxfs/xfs_btree_mem.h
50
void xfbtree_set_root(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.h
52
void xfbtree_init_ptr_from_cur(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.h
54
struct xfs_btree_cur *xfbtree_dup_cursor(struct xfs_btree_cur *cur);
fs/xfs/libxfs/xfs_btree_mem.h
56
int xfbtree_get_minrecs(struct xfs_btree_cur *cur, int level);
fs/xfs/libxfs/xfs_btree_mem.h
57
int xfbtree_get_maxrecs(struct xfs_btree_cur *cur, int level);
fs/xfs/libxfs/xfs_btree_mem.h
59
int xfbtree_alloc_block(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_mem.h
62
int xfbtree_free_block(struct xfs_btree_cur *cur, struct xfs_buf *bp);
fs/xfs/libxfs/xfs_btree_staging.c
127
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
130
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
fs/xfs/libxfs/xfs_btree_staging.c
131
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
fs/xfs/libxfs/xfs_btree_staging.c
132
ASSERT(cur->bc_tp == NULL);
fs/xfs/libxfs/xfs_btree_staging.c
134
cur->bc_ino.ifake = ifake;
fs/xfs/libxfs/xfs_btree_staging.c
135
cur->bc_nlevels = ifake->if_levels;
fs/xfs/libxfs/xfs_btree_staging.c
136
cur->bc_ino.forksize = ifake->if_fork_size;
fs/xfs/libxfs/xfs_btree_staging.c
137
cur->bc_ino.whichfork = XFS_STAGING_FORK;
fs/xfs/libxfs/xfs_btree_staging.c
138
cur->bc_flags |= XFS_BTREE_STAGING;
fs/xfs/libxfs/xfs_btree_staging.c
149
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
153
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_btree_staging.c
154
ASSERT(cur->bc_tp == NULL);
fs/xfs/libxfs/xfs_btree_staging.c
156
trace_xfs_btree_commit_ifakeroot(cur);
fs/xfs/libxfs/xfs_btree_staging.c
158
cur->bc_ino.ifake = NULL;
fs/xfs/libxfs/xfs_btree_staging.c
159
cur->bc_ino.whichfork = whichfork;
fs/xfs/libxfs/xfs_btree_staging.c
160
cur->bc_flags &= ~XFS_BTREE_STAGING;
fs/xfs/libxfs/xfs_btree_staging.c
161
cur->bc_tp = tp;
fs/xfs/libxfs/xfs_btree_staging.c
284
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
299
if (xfs_btree_at_iroot(cur, level)) {
fs/xfs/libxfs/xfs_btree_staging.c
300
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_btree_staging.c
306
new_size = bbl->iroot_size(cur, level, nr_this_block, priv);
fs/xfs/libxfs/xfs_btree_staging.c
311
xfs_btree_init_block(cur->bc_mp, ifp->if_broot, cur->bc_ops,
fs/xfs/libxfs/xfs_btree_staging.c
312
level, nr_this_block, cur->bc_ino.ip->i_ino);
fs/xfs/libxfs/xfs_btree_staging.c
316
xfs_btree_set_ptr_null(cur, ptrp);
fs/xfs/libxfs/xfs_btree_staging.c
321
xfs_btree_set_ptr_null(cur, &new_ptr);
fs/xfs/libxfs/xfs_btree_staging.c
322
ret = bbl->claim_block(cur, &new_ptr, priv);
fs/xfs/libxfs/xfs_btree_staging.c
326
ASSERT(!xfs_btree_ptr_is_null(cur, &new_ptr));
fs/xfs/libxfs/xfs_btree_staging.c
328
ret = xfs_btree_get_buf_block(cur, &new_ptr, &new_block, &new_bp);
fs/xfs/libxfs/xfs_btree_staging.c
337
xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB);
fs/xfs/libxfs/xfs_btree_staging.c
344
xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block);
fs/xfs/libxfs/xfs_btree_staging.c
345
xfs_btree_set_sibling(cur, new_block, ptrp, XFS_BB_LEFTSIB);
fs/xfs/libxfs/xfs_btree_staging.c
350
xfs_btree_copy_ptrs(cur, ptrp, &new_ptr, 1);
fs/xfs/libxfs/xfs_btree_staging.c
357
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
368
ret = get_records(cur, j, block, recs_this_block - j + 1, priv);
fs/xfs/libxfs/xfs_btree_staging.c
387
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
403
ASSERT(!xfs_btree_ptr_is_null(cur, child_ptr));
fs/xfs/libxfs/xfs_btree_staging.c
410
ret = xfs_btree_read_buf_block(cur, child_ptr, 0, &child_block,
fs/xfs/libxfs/xfs_btree_staging.c
415
block_ptr = xfs_btree_ptr_addr(cur, j, block);
fs/xfs/libxfs/xfs_btree_staging.c
416
xfs_btree_copy_ptrs(cur, block_ptr, child_ptr, 1);
fs/xfs/libxfs/xfs_btree_staging.c
418
block_key = xfs_btree_key_addr(cur, j, block);
fs/xfs/libxfs/xfs_btree_staging.c
419
xfs_btree_get_keys(cur, child_block, &child_key);
fs/xfs/libxfs/xfs_btree_staging.c
420
xfs_btree_copy_keys(cur, block_key, &child_key, 1);
fs/xfs/libxfs/xfs_btree_staging.c
422
xfs_btree_get_sibling(cur, child_block, child_ptr,
fs/xfs/libxfs/xfs_btree_staging.c
437
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
443
if (level == cur->bc_nlevels - 1 && cur->bc_ops->get_dmaxrecs)
fs/xfs/libxfs/xfs_btree_staging.c
444
return cur->bc_ops->get_dmaxrecs(cur, level);
fs/xfs/libxfs/xfs_btree_staging.c
446
ret = cur->bc_ops->get_maxrecs(cur, level);
fs/xfs/libxfs/xfs_btree_staging.c
461
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
465
unsigned int npb = xfs_btree_bload_max_npb(cur, bbl, level);
fs/xfs/libxfs/xfs_btree_staging.c
468
if (level == cur->bc_nlevels - 1)
fs/xfs/libxfs/xfs_btree_staging.c
471
return max_t(unsigned int, cur->bc_ops->get_minrecs(cur, level), npb);
fs/xfs/libxfs/xfs_btree_staging.c
482
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
499
if (cur->bc_ops->get_dmaxrecs)
fs/xfs/libxfs/xfs_btree_staging.c
500
maxnr = cur->bc_ops->get_dmaxrecs(cur, level);
fs/xfs/libxfs/xfs_btree_staging.c
502
maxnr = cur->bc_ops->get_maxrecs(cur, level);
fs/xfs/libxfs/xfs_btree_staging.c
511
desired_npb = xfs_btree_bload_desired_npb(cur, bbl, level);
fs/xfs/libxfs/xfs_btree_staging.c
531
trace_xfs_btree_bload_level_geometry(cur, level, nr_this_level,
fs/xfs/libxfs/xfs_btree_staging.c
545
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
552
maxr = cur->bc_ops->get_maxrecs(cur, level);
fs/xfs/libxfs/xfs_btree_staging.c
553
minr = cur->bc_ops->get_minrecs(cur, level);
fs/xfs/libxfs/xfs_btree_staging.c
573
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
577
const struct xfs_btree_ops *ops = cur->bc_ops;
fs/xfs/libxfs/xfs_btree_staging.c
581
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_btree_staging.c
590
cur->bc_nlevels = cur->bc_maxlevels - 1;
fs/xfs/libxfs/xfs_btree_staging.c
591
xfs_btree_bload_ensure_slack(cur, &bbl->leaf_slack, 0);
fs/xfs/libxfs/xfs_btree_staging.c
592
xfs_btree_bload_ensure_slack(cur, &bbl->node_slack, 1);
fs/xfs/libxfs/xfs_btree_staging.c
595
for (cur->bc_nlevels = 1; cur->bc_nlevels <= cur->bc_maxlevels;) {
fs/xfs/libxfs/xfs_btree_staging.c
598
unsigned int level = cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_btree_staging.c
60
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
601
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
fs/xfs/libxfs/xfs_btree_staging.c
63
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
fs/xfs/libxfs/xfs_btree_staging.c
637
cur->bc_nlevels++;
fs/xfs/libxfs/xfs_btree_staging.c
638
ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
fs/xfs/libxfs/xfs_btree_staging.c
639
xfs_btree_bload_level_geometry(cur, bbl, level,
fs/xfs/libxfs/xfs_btree_staging.c
64
ASSERT(cur->bc_ops->type != XFS_BTREE_TYPE_INODE);
fs/xfs/libxfs/xfs_btree_staging.c
65
ASSERT(cur->bc_tp == NULL);
fs/xfs/libxfs/xfs_btree_staging.c
653
cur->bc_nlevels++;
fs/xfs/libxfs/xfs_btree_staging.c
654
ASSERT(cur->bc_nlevels <= cur->bc_maxlevels);
fs/xfs/libxfs/xfs_btree_staging.c
661
if (cur->bc_nlevels > cur->bc_maxlevels)
fs/xfs/libxfs/xfs_btree_staging.c
664
bbl->btree_height = cur->bc_nlevels;
fs/xfs/libxfs/xfs_btree_staging.c
67
cur->bc_ag.afake = afake;
fs/xfs/libxfs/xfs_btree_staging.c
675
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
68
cur->bc_nlevels = afake->af_levels;
fs/xfs/libxfs/xfs_btree_staging.c
69
cur->bc_flags |= XFS_BTREE_STAGING;
fs/xfs/libxfs/xfs_btree_staging.c
693
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_btree_staging.c
696
cur->bc_nlevels = bbl->btree_height;
fs/xfs/libxfs/xfs_btree_staging.c
697
xfs_btree_set_ptr_null(cur, &child_ptr);
fs/xfs/libxfs/xfs_btree_staging.c
698
xfs_btree_set_ptr_null(cur, &ptr);
fs/xfs/libxfs/xfs_btree_staging.c
701
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
fs/xfs/libxfs/xfs_btree_staging.c
717
ret = xfs_btree_bload_prep_block(cur, bbl, &buffers_list, level,
fs/xfs/libxfs/xfs_btree_staging.c
722
trace_xfs_btree_bload_block(cur, level, i, blocks, &ptr,
fs/xfs/libxfs/xfs_btree_staging.c
725
ret = xfs_btree_bload_leaf(cur, nr_this_block, bbl->get_records,
fs/xfs/libxfs/xfs_btree_staging.c
735
xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1);
fs/xfs/libxfs/xfs_btree_staging.c
744
for (level = 1; level < cur->bc_nlevels; level++) {
fs/xfs/libxfs/xfs_btree_staging.c
749
xfs_btree_set_ptr_null(cur, &ptr);
fs/xfs/libxfs/xfs_btree_staging.c
751
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
fs/xfs/libxfs/xfs_btree_staging.c
761
ret = xfs_btree_bload_prep_block(cur, bbl,
fs/xfs/libxfs/xfs_btree_staging.c
767
trace_xfs_btree_bload_block(cur, level, i, blocks,
fs/xfs/libxfs/xfs_btree_staging.c
770
ret = xfs_btree_bload_node(cur, nr_this_block,
fs/xfs/libxfs/xfs_btree_staging.c
780
xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1);
fs/xfs/libxfs/xfs_btree_staging.c
788
xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1);
fs/xfs/libxfs/xfs_btree_staging.c
792
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
fs/xfs/libxfs/xfs_btree_staging.c
793
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
fs/xfs/libxfs/xfs_btree_staging.c
794
cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
fs/xfs/libxfs/xfs_btree_staging.c
795
cur->bc_ino.ifake->if_blocks = total_blocks - 1;
fs/xfs/libxfs/xfs_btree_staging.c
797
cur->bc_ag.afake->af_root = be32_to_cpu(ptr.s);
fs/xfs/libxfs/xfs_btree_staging.c
798
cur->bc_ag.afake->af_levels = cur->bc_nlevels;
fs/xfs/libxfs/xfs_btree_staging.c
799
cur->bc_ag.afake->af_blocks = total_blocks;
fs/xfs/libxfs/xfs_btree_staging.c
80
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.c
84
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_btree_staging.c
85
ASSERT(cur->bc_tp == NULL);
fs/xfs/libxfs/xfs_btree_staging.c
87
trace_xfs_btree_commit_afakeroot(cur);
fs/xfs/libxfs/xfs_btree_staging.c
89
cur->bc_ag.afake = NULL;
fs/xfs/libxfs/xfs_btree_staging.c
90
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_btree_staging.c
91
cur->bc_flags &= ~XFS_BTREE_STAGING;
fs/xfs/libxfs/xfs_btree_staging.c
92
cur->bc_tp = tp;
fs/xfs/libxfs/xfs_btree_staging.h
125
int xfs_btree_bload_compute_geometry(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.h
127
int xfs_btree_bload(struct xfs_btree_cur *cur, struct xfs_btree_bload *bbl,
fs/xfs/libxfs/xfs_btree_staging.h
22
void xfs_btree_stage_afakeroot(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.h
24
void xfs_btree_commit_afakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
fs/xfs/libxfs/xfs_btree_staging.h
43
void xfs_btree_stage_ifakeroot(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.h
45
void xfs_btree_commit_ifakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp,
fs/xfs/libxfs/xfs_btree_staging.h
49
typedef int (*xfs_btree_bload_get_records_fn)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.h
52
typedef int (*xfs_btree_bload_claim_block_fn)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_btree_staging.h
54
typedef size_t (*xfs_btree_bload_iroot_size_fn)(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_health.h
226
void xfs_btree_mark_sick(struct xfs_btree_cur *cur);
fs/xfs/libxfs/xfs_ialloc.c
1002
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1003
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1013
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
1021
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
fs/xfs/libxfs/xfs_ialloc.c
1026
error = xfs_inobt_get_rec(cur, rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
1029
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1030
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1101
struct xfs_btree_cur *cur, *tcur;
fs/xfs/libxfs/xfs_ialloc.c
1114
cur = xfs_inobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
1122
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
1133
error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
fs/xfs/libxfs/xfs_ialloc.c
1137
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1142
error = xfs_inobt_get_rec(cur, &rec, &j);
fs/xfs/libxfs/xfs_ialloc.c
1146
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1165
error = xfs_btree_dup_cursor(cur, &tcur);
fs/xfs/libxfs/xfs_ialloc.c
1181
error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
fs/xfs/libxfs/xfs_ialloc.c
1192
error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
fs/xfs/libxfs/xfs_ialloc.c
1214
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
1215
cur = tcur;
fs/xfs/libxfs/xfs_ialloc.c
1239
error = xfs_ialloc_next_rec(cur, &rec,
fs/xfs/libxfs/xfs_ialloc.c
1268
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
1278
error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
fs/xfs/libxfs/xfs_ialloc.c
1284
error = xfs_inobt_get_rec(cur, &rec, &j);
fs/xfs/libxfs/xfs_ialloc.c
1301
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
fs/xfs/libxfs/xfs_ialloc.c
1305
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1311
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
1315
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1321
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_ialloc.c
1325
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1347
error = xfs_inobt_update(cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
1354
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
1358
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
1365
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
137
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
141
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_ialloc.c
145
cur->bc_ops->name, cur->bc_group->xg_gno, fa);
fs/xfs/libxfs/xfs_ialloc.c
1468
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
1475
error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
fs/xfs/libxfs/xfs_ialloc.c
1480
error = xfs_inobt_get_rec(cur, rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
1483
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1484
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1494
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
fs/xfs/libxfs/xfs_ialloc.c
1497
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1498
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
150
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1502
error = xfs_inobt_get_rec(cur, rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
1505
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1506
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1519
struct xfs_btree_cur *cur, /* inobt cursor */
fs/xfs/libxfs/xfs_ialloc.c
1527
error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
fs/xfs/libxfs/xfs_ialloc.c
1530
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1531
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1535
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
1538
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_ialloc.c
1539
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1542
ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
fs/xfs/libxfs/xfs_ialloc.c
1548
if (XFS_IS_CORRUPT(cur->bc_mp,
fs/xfs/libxfs/xfs_ialloc.c
1551
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
1555
return xfs_inobt_update(cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
1577
struct xfs_btree_cur *cur; /* finobt cursor */
fs/xfs/libxfs/xfs_ialloc.c
159
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
1595
cur = xfs_finobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
1597
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
1607
error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
1609
error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
163
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_ialloc.c
1632
error = xfs_inobt_update(cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
1634
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_ialloc.c
1667
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
1672
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
1679
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
168
error = xfs_btree_get_rec(cur, &rec, stat);
fs/xfs/libxfs/xfs_ialloc.c
173
fa = xfs_inobt_check_irec(to_perag(cur->bc_group), irec);
fs/xfs/libxfs/xfs_ialloc.c
175
return xfs_inobt_complain_bad_rec(cur, fa, irec);
fs/xfs/libxfs/xfs_ialloc.c
185
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
192
cur->bc_rec.i.ir_holemask = holemask;
fs/xfs/libxfs/xfs_ialloc.c
193
cur->bc_rec.i.ir_count = count;
fs/xfs/libxfs/xfs_ialloc.c
194
cur->bc_rec.i.ir_freecount = freecount;
fs/xfs/libxfs/xfs_ialloc.c
195
cur->bc_rec.i.ir_free = free;
fs/xfs/libxfs/xfs_ialloc.c
196
return xfs_btree_insert(cur, stat);
fs/xfs/libxfs/xfs_ialloc.c
2084
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
2097
cur = xfs_inobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
2099
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
2106
if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
fs/xfs/libxfs/xfs_ialloc.c
211
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
2112
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
2116
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
2123
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
2164
if ((error = xfs_btree_delete(cur, &i))) {
fs/xfs/libxfs/xfs_ialloc.c
217
cur = xfs_finobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
2176
error = xfs_inobt_update(cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
219
cur = xfs_inobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
2192
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
2197
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
2201
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
2217
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
2223
cur = xfs_finobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
2225
error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
fs/xfs/libxfs/xfs_ialloc.c
2235
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
224
error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
fs/xfs/libxfs/xfs_ialloc.c
2240
error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
fs/xfs/libxfs/xfs_ialloc.c
2258
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
226
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
2262
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
2273
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
2292
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_ialloc.c
2297
error = xfs_inobt_update(cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
2303
error = xfs_check_agi_freecount(cur);
fs/xfs/libxfs/xfs_ialloc.c
2307
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
231
error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
fs/xfs/libxfs/xfs_ialloc.c
2311
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
236
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
2404
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
242
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
2423
cur = xfs_inobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
2424
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
fs/xfs/libxfs/xfs_ialloc.c
2427
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
2433
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_ialloc.c
253
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_ialloc.c
255
if (cur->bc_nlevels == 1) {
fs/xfs/libxfs/xfs_ialloc.c
261
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
fs/xfs/libxfs/xfs_ialloc.c
266
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
272
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_ialloc.c
278
if (!xfs_is_shutdown(cur->bc_mp)) {
fs/xfs/libxfs/xfs_ialloc.c
280
to_perag(cur->bc_group)->pagi_freecount);
fs/xfs/libxfs/xfs_ialloc.c
2844
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
2854
error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
fs/xfs/libxfs/xfs_ialloc.c
286
#define xfs_check_agi_freecount(cur) 0
fs/xfs/libxfs/xfs_ialloc.c
2861
error = xfs_inobt_get_rec(cur, &irec, &has_record);
fs/xfs/libxfs/xfs_ialloc.c
2878
error = xfs_btree_increment(cur, 0, &has_record);
fs/xfs/libxfs/xfs_ialloc.c
2890
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
2900
agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
fs/xfs/libxfs/xfs_ialloc.c
2901
last_agino = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
fs/xfs/libxfs/xfs_ialloc.c
2903
error = xfs_ialloc_count_ondisk(cur, agino, last_agino, &allocated);
fs/xfs/libxfs/xfs_ialloc.c
2924
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
2932
xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
fs/xfs/libxfs/xfs_ialloc.c
2933
fa = xfs_inobt_check_irec(to_perag(cur->bc_group), &irec);
fs/xfs/libxfs/xfs_ialloc.c
2935
return xfs_inobt_complain_bad_rec(cur, fa, &irec);
fs/xfs/libxfs/xfs_ialloc.c
2946
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
2953
ASSERT(xfs_btree_is_ino(cur->bc_ops));
fs/xfs/libxfs/xfs_ialloc.c
2954
error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
fs/xfs/libxfs/xfs_ialloc.c
3167
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
3175
cur = xfs_inobt_init_cursor(pag, tp, agibp);
fs/xfs/libxfs/xfs_ialloc.c
3179
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has);
fs/xfs/libxfs/xfs_ialloc.c
3183
error = xfs_inobt_get_rec(cur, &rec, &has);
fs/xfs/libxfs/xfs_ialloc.c
3199
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_ialloc.c
37
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_ialloc.c
42
cur->bc_rec.i.ir_startino = ino;
fs/xfs/libxfs/xfs_ialloc.c
43
cur->bc_rec.i.ir_holemask = 0;
fs/xfs/libxfs/xfs_ialloc.c
44
cur->bc_rec.i.ir_count = 0;
fs/xfs/libxfs/xfs_ialloc.c
45
cur->bc_rec.i.ir_freecount = 0;
fs/xfs/libxfs/xfs_ialloc.c
46
cur->bc_rec.i.ir_free = 0;
fs/xfs/libxfs/xfs_ialloc.c
47
return xfs_btree_lookup(cur, dir, stat);
fs/xfs/libxfs/xfs_ialloc.c
557
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
56
struct xfs_btree_cur *cur, /* btree cursor */
fs/xfs/libxfs/xfs_ialloc.c
562
cur = xfs_inobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
565
error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
fs/xfs/libxfs/xfs_ialloc.c
570
error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
fs/xfs/libxfs/xfs_ialloc.c
576
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
587
error = xfs_inobt_get_rec(cur, &rec, &i);
fs/xfs/libxfs/xfs_ialloc.c
591
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
596
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
606
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
62
if (xfs_has_sparseinodes(cur->bc_mp)) {
fs/xfs/libxfs/xfs_ialloc.c
622
error = xfs_inobt_update(cur, nrec);
fs/xfs/libxfs/xfs_ialloc.c
627
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
630
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
651
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc.c
655
cur = xfs_finobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc.c
658
error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
fs/xfs/libxfs/xfs_ialloc.c
663
error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
fs/xfs/libxfs/xfs_ialloc.c
669
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_ialloc.c
674
error = xfs_inobt_update(cur, nrec);
fs/xfs/libxfs/xfs_ialloc.c
679
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_ialloc.c
682
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
fs/xfs/libxfs/xfs_ialloc.c
71
return xfs_btree_update(cur, &rec);
fs/xfs/libxfs/xfs_ialloc.c
982
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.c
991
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_ialloc.c
993
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_ialloc.c
999
error = xfs_inobt_get_rec(cur, rec, &i);
fs/xfs/libxfs/xfs_ialloc.h
102
int xfs_ialloc_has_inodes_at_extent(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc.h
105
int xfs_ialloc_count_inodes(struct xfs_btree_cur *cur, xfs_agino_t *count,
fs/xfs/libxfs/xfs_ialloc.h
107
int xfs_inobt_insert_rec(struct xfs_btree_cur *cur, uint16_t holemask,
fs/xfs/libxfs/xfs_ialloc.h
77
int xfs_inobt_lookup(struct xfs_btree_cur *cur, xfs_agino_t ino,
fs/xfs/libxfs/xfs_ialloc.h
83
int xfs_inobt_get_rec(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
102
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
113
args.tp = cur->bc_tp;
fs/xfs/libxfs/xfs_ialloc_btree.c
114
args.mp = cur->bc_mp;
fs/xfs/libxfs/xfs_ialloc_btree.c
115
args.pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_ialloc_btree.c
135
xfs_inobt_mod_blockcount(cur, 1);
fs/xfs/libxfs/xfs_ialloc_btree.c
141
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
146
return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
fs/xfs/libxfs/xfs_ialloc_btree.c
151
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
156
if (cur->bc_mp->m_finobt_nores)
fs/xfs/libxfs/xfs_ialloc_btree.c
157
return xfs_inobt_alloc_block(cur, start, new, stat);
fs/xfs/libxfs/xfs_ialloc_btree.c
158
return __xfs_inobt_alloc_block(cur, start, new, stat,
fs/xfs/libxfs/xfs_ialloc_btree.c
164
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
170
xfs_inobt_mod_blockcount(cur, -1);
fs/xfs/libxfs/xfs_ialloc_btree.c
171
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
fs/xfs/libxfs/xfs_ialloc_btree.c
172
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
fs/xfs/libxfs/xfs_ialloc_btree.c
178
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
181
return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
fs/xfs/libxfs/xfs_ialloc_btree.c
186
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
189
if (cur->bc_mp->m_finobt_nores)
fs/xfs/libxfs/xfs_ialloc_btree.c
190
return xfs_inobt_free_block(cur, bp);
fs/xfs/libxfs/xfs_ialloc_btree.c
191
return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
fs/xfs/libxfs/xfs_ialloc_btree.c
196
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
199
return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
fs/xfs/libxfs/xfs_ialloc_btree.c
224
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
227
rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
fs/xfs/libxfs/xfs_ialloc_btree.c
228
if (xfs_has_sparseinodes(cur->bc_mp)) {
fs/xfs/libxfs/xfs_ialloc_btree.c
230
cpu_to_be16(cur->bc_rec.i.ir_holemask);
fs/xfs/libxfs/xfs_ialloc_btree.c
231
rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
fs/xfs/libxfs/xfs_ialloc_btree.c
232
rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
fs/xfs/libxfs/xfs_ialloc_btree.c
236
cpu_to_be32(cur->bc_rec.i.ir_freecount);
fs/xfs/libxfs/xfs_ialloc_btree.c
238
rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
fs/xfs/libxfs/xfs_ialloc_btree.c
246
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
249
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
fs/xfs/libxfs/xfs_ialloc_btree.c
251
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
fs/xfs/libxfs/xfs_ialloc_btree.c
258
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
261
struct xfs_agi *agi = cur->bc_ag.agbp->b_addr;
fs/xfs/libxfs/xfs_ialloc_btree.c
263
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agi->agi_seqno));
fs/xfs/libxfs/xfs_ialloc_btree.c
270
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
274
cur->bc_rec.i.ir_startino);
fs/xfs/libxfs/xfs_ialloc_btree.c
279
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
30
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
33
return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
fs/xfs/libxfs/xfs_ialloc_btree.c
38
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_ialloc_btree.c
380
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
390
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
40
return xfs_inobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
fs/xfs/libxfs/xfs_ialloc_btree.c
400
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
41
cur->bc_ag.agbp);
fs/xfs/libxfs/xfs_ialloc_btree.c
46
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_ialloc_btree.c
48
return xfs_finobt_init_cursor(to_perag(cur->bc_group), cur->bc_tp,
fs/xfs/libxfs/xfs_ialloc_btree.c
483
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc_btree.c
485
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_inobt_ops,
fs/xfs/libxfs/xfs_ialloc_btree.c
487
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_ialloc_btree.c
488
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_ialloc_btree.c
49
cur->bc_ag.agbp);
fs/xfs/libxfs/xfs_ialloc_btree.c
492
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
fs/xfs/libxfs/xfs_ialloc_btree.c
494
return cur;
fs/xfs/libxfs/xfs_ialloc_btree.c
509
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc_btree.c
511
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_finobt_ops,
fs/xfs/libxfs/xfs_ialloc_btree.c
513
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_ialloc_btree.c
514
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_ialloc_btree.c
518
cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
fs/xfs/libxfs/xfs_ialloc_btree.c
520
return cur;
fs/xfs/libxfs/xfs_ialloc_btree.c
529
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
534
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
fs/xfs/libxfs/xfs_ialloc_btree.c
537
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_ialloc_btree.c
539
if (xfs_btree_is_ino(cur->bc_ops)) {
fs/xfs/libxfs/xfs_ialloc_btree.c
54
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
543
if (xfs_has_inobtcounts(cur->bc_mp)) {
fs/xfs/libxfs/xfs_ialloc_btree.c
548
xfs_btree_commit_afakeroot(cur, tp, agbp);
fs/xfs/libxfs/xfs_ialloc_btree.c
553
if (xfs_has_inobtcounts(cur->bc_mp)) {
fs/xfs/libxfs/xfs_ialloc_btree.c
558
xfs_btree_commit_afakeroot(cur, tp, agbp);
fs/xfs/libxfs/xfs_ialloc_btree.c
58
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_ialloc_btree.c
63
xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
fs/xfs/libxfs/xfs_ialloc_btree.c
68
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
72
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_ialloc_btree.c
746
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_ialloc_btree.c
754
cur = xfs_finobt_init_cursor(pag, tp, agbp);
fs/xfs/libxfs/xfs_ialloc_btree.c
755
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/libxfs/xfs_ialloc_btree.c
756
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_ialloc_btree.c
77
xfs_ialloc_log_agi(cur->bc_tp, agbp,
fs/xfs/libxfs/xfs_ialloc_btree.c
84
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_ialloc_btree.c
87
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_ialloc_btree.c
90
if (!xfs_has_inobtcounts(cur->bc_mp))
fs/xfs/libxfs/xfs_ialloc_btree.c
93
if (xfs_btree_is_fino(cur->bc_ops))
fs/xfs/libxfs/xfs_ialloc_btree.c
97
xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
fs/xfs/libxfs/xfs_ialloc_btree.h
71
void xfs_inobt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_iext_tree.c
1005
if (cur->pos == 0) {
fs/xfs/libxfs/xfs_iext_tree.c
1008
xfs_iext_get(&old, cur_rec(cur));
fs/xfs/libxfs/xfs_iext_tree.c
1011
new->br_startoff, 1, cur->leaf);
fs/xfs/libxfs/xfs_iext_tree.c
1015
trace_xfs_bmap_pre_update(ip, cur, state, _RET_IP_);
fs/xfs/libxfs/xfs_iext_tree.c
1016
xfs_iext_set(cur_rec(cur), new);
fs/xfs/libxfs/xfs_iext_tree.c
1017
trace_xfs_bmap_post_update(ip, cur, state, _RET_IP_);
fs/xfs/libxfs/xfs_iext_tree.c
1027
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
1030
if (!xfs_iext_valid(ifp, cur))
fs/xfs/libxfs/xfs_iext_tree.c
1032
xfs_iext_get(gotp, cur_rec(cur));
fs/xfs/libxfs/xfs_iext_tree.c
140
static inline struct xfs_iext_rec *cur_rec(struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
142
return &cur->leaf->recs[cur->pos];
fs/xfs/libxfs/xfs_iext_tree.c
146
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
148
if (!cur->leaf)
fs/xfs/libxfs/xfs_iext_tree.c
150
if (cur->pos < 0 || cur->pos >= xfs_iext_max_recs(ifp))
fs/xfs/libxfs/xfs_iext_tree.c
152
if (xfs_iext_rec_is_empty(cur_rec(cur)))
fs/xfs/libxfs/xfs_iext_tree.c
199
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
201
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
202
cur->leaf = xfs_iext_find_first_leaf(ifp);
fs/xfs/libxfs/xfs_iext_tree.c
208
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
212
cur->leaf = xfs_iext_find_last_leaf(ifp);
fs/xfs/libxfs/xfs_iext_tree.c
213
if (!cur->leaf) {
fs/xfs/libxfs/xfs_iext_tree.c
214
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
219
if (xfs_iext_rec_is_empty(&cur->leaf->recs[i]))
fs/xfs/libxfs/xfs_iext_tree.c
222
cur->pos = i - 1;
fs/xfs/libxfs/xfs_iext_tree.c
228
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
230
if (!cur->leaf) {
fs/xfs/libxfs/xfs_iext_tree.c
231
ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
fs/xfs/libxfs/xfs_iext_tree.c
232
xfs_iext_first(ifp, cur);
fs/xfs/libxfs/xfs_iext_tree.c
236
ASSERT(cur->pos >= 0);
fs/xfs/libxfs/xfs_iext_tree.c
237
ASSERT(cur->pos < xfs_iext_max_recs(ifp));
fs/xfs/libxfs/xfs_iext_tree.c
239
cur->pos++;
fs/xfs/libxfs/xfs_iext_tree.c
240
if (ifp->if_height > 1 && !xfs_iext_valid(ifp, cur) &&
fs/xfs/libxfs/xfs_iext_tree.c
241
cur->leaf->next) {
fs/xfs/libxfs/xfs_iext_tree.c
242
cur->leaf = cur->leaf->next;
fs/xfs/libxfs/xfs_iext_tree.c
243
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
250
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
252
if (!cur->leaf) {
fs/xfs/libxfs/xfs_iext_tree.c
253
ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
fs/xfs/libxfs/xfs_iext_tree.c
254
xfs_iext_last(ifp, cur);
fs/xfs/libxfs/xfs_iext_tree.c
258
ASSERT(cur->pos >= 0);
fs/xfs/libxfs/xfs_iext_tree.c
259
ASSERT(cur->pos <= RECS_PER_LEAF);
fs/xfs/libxfs/xfs_iext_tree.c
263
cur->pos--;
fs/xfs/libxfs/xfs_iext_tree.c
264
if (xfs_iext_valid(ifp, cur))
fs/xfs/libxfs/xfs_iext_tree.c
266
} while (cur->pos > 0);
fs/xfs/libxfs/xfs_iext_tree.c
268
if (ifp->if_height > 1 && cur->leaf->prev) {
fs/xfs/libxfs/xfs_iext_tree.c
269
cur->leaf = cur->leaf->prev;
fs/xfs/libxfs/xfs_iext_tree.c
270
cur->pos = RECS_PER_LEAF;
fs/xfs/libxfs/xfs_iext_tree.c
548
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
551
struct xfs_iext_leaf *leaf = cur->leaf;
fs/xfs/libxfs/xfs_iext_tree.c
558
if (cur->pos == RECS_PER_LEAF) {
fs/xfs/libxfs/xfs_iext_tree.c
559
cur->leaf = new;
fs/xfs/libxfs/xfs_iext_tree.c
560
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
570
if (cur->pos >= nr_keep) {
fs/xfs/libxfs/xfs_iext_tree.c
571
cur->leaf = new;
fs/xfs/libxfs/xfs_iext_tree.c
572
cur->pos -= nr_keep;
fs/xfs/libxfs/xfs_iext_tree.c
589
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
597
cur->leaf = ifp->if_data;
fs/xfs/libxfs/xfs_iext_tree.c
598
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
604
struct xfs_iext_cursor *cur)
fs/xfs/libxfs/xfs_iext_tree.c
617
cur->leaf = new;
fs/xfs/libxfs/xfs_iext_tree.c
635
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
645
xfs_iext_alloc_root(ifp, cur);
fs/xfs/libxfs/xfs_iext_tree.c
647
xfs_iext_realloc_root(ifp, cur);
fs/xfs/libxfs/xfs_iext_tree.c
649
nr_entries = xfs_iext_leaf_nr_entries(ifp, cur->leaf, cur->pos);
fs/xfs/libxfs/xfs_iext_tree.c
651
ASSERT(cur->pos >= nr_entries ||
fs/xfs/libxfs/xfs_iext_tree.c
652
xfs_iext_rec_cmp(cur_rec(cur), irec->br_startoff) != 0);
fs/xfs/libxfs/xfs_iext_tree.c
655
new = xfs_iext_split_leaf(cur, &nr_entries);
fs/xfs/libxfs/xfs_iext_tree.c
661
if (cur->leaf != new && cur->pos == 0 && nr_entries > 0) {
fs/xfs/libxfs/xfs_iext_tree.c
662
xfs_iext_update_node(ifp, xfs_iext_leaf_key(cur->leaf, 0),
fs/xfs/libxfs/xfs_iext_tree.c
663
offset, 1, cur->leaf);
fs/xfs/libxfs/xfs_iext_tree.c
666
for (i = nr_entries; i > cur->pos; i--)
fs/xfs/libxfs/xfs_iext_tree.c
667
cur->leaf->recs[i] = cur->leaf->recs[i - 1];
fs/xfs/libxfs/xfs_iext_tree.c
668
xfs_iext_set(cur_rec(cur), irec);
fs/xfs/libxfs/xfs_iext_tree.c
678
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
684
xfs_iext_insert_raw(ifp, cur, irec);
fs/xfs/libxfs/xfs_iext_tree.c
685
trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
fs/xfs/libxfs/xfs_iext_tree.c
807
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
827
if (cur->leaf == leaf) {
fs/xfs/libxfs/xfs_iext_tree.c
828
cur->leaf = leaf->prev;
fs/xfs/libxfs/xfs_iext_tree.c
829
cur->pos += nr_prev;
fs/xfs/libxfs/xfs_iext_tree.c
849
if (cur->leaf == leaf->next) {
fs/xfs/libxfs/xfs_iext_tree.c
850
cur->leaf = leaf;
fs/xfs/libxfs/xfs_iext_tree.c
851
cur->pos += nr_entries;
fs/xfs/libxfs/xfs_iext_tree.c
881
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
885
struct xfs_iext_leaf *leaf = cur->leaf;
fs/xfs/libxfs/xfs_iext_tree.c
889
trace_xfs_iext_remove(ip, cur, state, _RET_IP_);
fs/xfs/libxfs/xfs_iext_tree.c
893
ASSERT(xfs_iext_valid(ifp, cur));
fs/xfs/libxfs/xfs_iext_tree.c
897
nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
fs/xfs/libxfs/xfs_iext_tree.c
898
for (i = cur->pos; i < nr_entries; i++)
fs/xfs/libxfs/xfs_iext_tree.c
903
if (cur->pos == 0 && nr_entries > 0) {
fs/xfs/libxfs/xfs_iext_tree.c
907
} else if (cur->pos == nr_entries) {
fs/xfs/libxfs/xfs_iext_tree.c
909
cur->leaf = leaf->next;
fs/xfs/libxfs/xfs_iext_tree.c
911
cur->leaf = NULL;
fs/xfs/libxfs/xfs_iext_tree.c
912
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
919
xfs_iext_rebalance_leaf(ifp, cur, leaf, offset, nr_entries);
fs/xfs/libxfs/xfs_iext_tree.c
940
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
945
cur->leaf = xfs_iext_find_level(ifp, offset, 1);
fs/xfs/libxfs/xfs_iext_tree.c
946
if (!cur->leaf) {
fs/xfs/libxfs/xfs_iext_tree.c
947
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
951
for (cur->pos = 0; cur->pos < xfs_iext_max_recs(ifp); cur->pos++) {
fs/xfs/libxfs/xfs_iext_tree.c
952
struct xfs_iext_rec *rec = cur_rec(cur);
fs/xfs/libxfs/xfs_iext_tree.c
961
if (ifp->if_height == 1 || !cur->leaf->next)
fs/xfs/libxfs/xfs_iext_tree.c
963
cur->leaf = cur->leaf->next;
fs/xfs/libxfs/xfs_iext_tree.c
964
cur->pos = 0;
fs/xfs/libxfs/xfs_iext_tree.c
965
if (!xfs_iext_valid(ifp, cur))
fs/xfs/libxfs/xfs_iext_tree.c
968
xfs_iext_get(gotp, cur_rec(cur));
fs/xfs/libxfs/xfs_iext_tree.c
981
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_iext_tree.c
985
if (xfs_iext_lookup_extent(ip, ifp, *end - 1, cur, gotp) &&
fs/xfs/libxfs/xfs_iext_tree.c
988
if (!xfs_iext_prev_extent(ifp, cur, gotp))
fs/xfs/libxfs/xfs_iext_tree.c
998
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
186
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
188
void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
196
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
200
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
203
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
206
struct xfs_iext_cursor *cur,
fs/xfs/libxfs/xfs_inode_fork.h
215
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
fs/xfs/libxfs/xfs_inode_fork.h
217
xfs_iext_next(ifp, cur);
fs/xfs/libxfs/xfs_inode_fork.h
218
return xfs_iext_get_extent(ifp, cur, gotp);
fs/xfs/libxfs/xfs_inode_fork.h
222
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
fs/xfs/libxfs/xfs_inode_fork.h
224
xfs_iext_prev(ifp, cur);
fs/xfs/libxfs/xfs_inode_fork.h
225
return xfs_iext_get_extent(ifp, cur, gotp);
fs/xfs/libxfs/xfs_inode_fork.h
232
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
fs/xfs/libxfs/xfs_inode_fork.h
234
struct xfs_iext_cursor ncur = *cur;
fs/xfs/libxfs/xfs_inode_fork.h
244
struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
fs/xfs/libxfs/xfs_inode_fork.h
246
struct xfs_iext_cursor ncur = *cur;
fs/xfs/libxfs/xfs_refcount.c
101
cur->bc_rec.rc.rc_startblock = bno;
fs/xfs/libxfs/xfs_refcount.c
102
cur->bc_rec.rc.rc_blockcount = 0;
fs/xfs/libxfs/xfs_refcount.c
1021
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
103
cur->bc_rec.rc.rc_domain = domain;
fs/xfs/libxfs/xfs_refcount.c
104
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
fs/xfs/libxfs/xfs_refcount.c
1040
error = xfs_refcount_find_left_extents(cur, &left, &cleft, domain,
fs/xfs/libxfs/xfs_refcount.c
1044
error = xfs_refcount_find_right_extents(cur, &right, &cright, domain,
fs/xfs/libxfs/xfs_refcount.c
1060
return xfs_refcount_merge_center_extents(cur, &left, &cleft,
fs/xfs/libxfs/xfs_refcount.c
1067
error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
fs/xfs/libxfs/xfs_refcount.c
1083
return xfs_refcount_merge_right_extent(cur, &right, &cright,
fs/xfs/libxfs/xfs_refcount.c
1098
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_refcount.c
1106
overhead = xfs_allocfree_block_count(cur->bc_mp,
fs/xfs/libxfs/xfs_refcount.c
1107
cur->bc_refc.shape_changes);
fs/xfs/libxfs/xfs_refcount.c
1108
overhead += cur->bc_maxlevels;
fs/xfs/libxfs/xfs_refcount.c
1109
overhead *= cur->bc_mp->m_sb.sb_blocksize;
fs/xfs/libxfs/xfs_refcount.c
1115
if (cur->bc_refc.nr_ops > 2 &&
fs/xfs/libxfs/xfs_refcount.c
1116
XFS_TEST_ERROR(cur->bc_mp, XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
fs/xfs/libxfs/xfs_refcount.c
1119
if (cur->bc_refc.nr_ops == 0)
fs/xfs/libxfs/xfs_refcount.c
1121
else if (overhead > cur->bc_tp->t_log_res)
fs/xfs/libxfs/xfs_refcount.c
1123
return cur->bc_tp->t_log_res - overhead >
fs/xfs/libxfs/xfs_refcount.c
1124
cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
fs/xfs/libxfs/xfs_refcount.c
1130
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1135
if (xfs_btree_is_rtrefcount(cur->bc_ops))
fs/xfs/libxfs/xfs_refcount.c
1138
return xfs_free_extent_later(cur->bc_tp,
fs/xfs/libxfs/xfs_refcount.c
1139
xfs_gbno_to_fsb(cur->bc_group, rec->rc_startblock),
fs/xfs/libxfs/xfs_refcount.c
1151
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1164
error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_SHARED, *agbno,
fs/xfs/libxfs/xfs_refcount.c
1169
while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
fs/xfs/libxfs/xfs_refcount.c
1170
error = xfs_refcount_get_rec(cur, &ext, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
1174
ext.rc_startblock = xfs_group_max_blocks(cur->bc_group);
fs/xfs/libxfs/xfs_refcount.c
1192
trace_xfs_refcount_modify_extent(cur, &tmp);
fs/xfs/libxfs/xfs_refcount.c
1198
cur->bc_refc.nr_ops++;
fs/xfs/libxfs/xfs_refcount.c
1200
error = xfs_refcount_insert(cur, &tmp,
fs/xfs/libxfs/xfs_refcount.c
1204
if (XFS_IS_CORRUPT(cur->bc_mp,
fs/xfs/libxfs/xfs_refcount.c
1206
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1211
error = xrefc_free_extent(cur, &tmp);
fs/xfs/libxfs/xfs_refcount.c
1220
if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
fs/xfs/libxfs/xfs_refcount.c
1224
error = xfs_refcount_lookup_ge(cur,
fs/xfs/libxfs/xfs_refcount.c
1238
if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount == 0) ||
fs/xfs/libxfs/xfs_refcount.c
1239
XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount > *aglen)) {
fs/xfs/libxfs/xfs_refcount.c
1240
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1252
trace_xfs_refcount_modify_extent(cur, &ext);
fs/xfs/libxfs/xfs_refcount.c
1253
cur->bc_refc.nr_ops++;
fs/xfs/libxfs/xfs_refcount.c
1255
error = xfs_refcount_update(cur, &ext);
fs/xfs/libxfs/xfs_refcount.c
1259
error = xfs_refcount_delete(cur, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
1262
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1263
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1269
error = xrefc_free_extent(cur, &ext);
fs/xfs/libxfs/xfs_refcount.c
1275
error = xfs_btree_increment(cur, 0, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
1286
trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
1293
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1303
trace_xfs_refcount_increase(cur, *agbno, *aglen);
fs/xfs/libxfs/xfs_refcount.c
1305
trace_xfs_refcount_decrease(cur, *agbno, *aglen);
fs/xfs/libxfs/xfs_refcount.c
1310
error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
fs/xfs/libxfs/xfs_refcount.c
1317
error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED,
fs/xfs/libxfs/xfs_refcount.c
1327
error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_SHARED,
fs/xfs/libxfs/xfs_refcount.c
1334
cur->bc_refc.shape_changes++;
fs/xfs/libxfs/xfs_refcount.c
1337
error = xfs_refcount_adjust_extents(cur, agbno, aglen, adj);
fs/xfs/libxfs/xfs_refcount.c
1344
trace_xfs_refcount_adjust_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
1354
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1358
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_refcount.c
1359
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_refcount.c
1363
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1472
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1476
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_refcount.c
1481
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1640
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1652
trace_xfs_refcount_find_shared(cur, agbno, aglen);
fs/xfs/libxfs/xfs_refcount.c
1659
error = xfs_refcount_lookup_le(cur, XFS_REFC_DOMAIN_SHARED, agbno,
fs/xfs/libxfs/xfs_refcount.c
1665
error = xfs_btree_increment(cur, 0, &have);
fs/xfs/libxfs/xfs_refcount.c
1671
error = xfs_refcount_get_rec(cur, &tmp, &i);
fs/xfs/libxfs/xfs_refcount.c
1674
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1675
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1684
error = xfs_btree_increment(cur, 0, &have);
fs/xfs/libxfs/xfs_refcount.c
1689
error = xfs_refcount_get_rec(cur, &tmp, &i);
fs/xfs/libxfs/xfs_refcount.c
1692
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1693
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1718
error = xfs_btree_increment(cur, 0, &have);
fs/xfs/libxfs/xfs_refcount.c
1723
error = xfs_refcount_get_rec(cur, &tmp, &i);
fs/xfs/libxfs/xfs_refcount.c
1726
if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1727
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
173
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1739
trace_xfs_refcount_find_shared_result(cur, *fbno, *flen);
fs/xfs/libxfs/xfs_refcount.c
1743
trace_xfs_refcount_find_shared_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
176
if (xfs_btree_is_rtrefcount(cur->bc_ops))
fs/xfs/libxfs/xfs_refcount.c
177
return xfs_rtrefcount_check_irec(to_rtg(cur->bc_group), irec);
fs/xfs/libxfs/xfs_refcount.c
178
return xfs_refcount_check_irec(to_perag(cur->bc_group), irec);
fs/xfs/libxfs/xfs_refcount.c
1801
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1814
error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_COW, agbno,
fs/xfs/libxfs/xfs_refcount.c
1818
error = xfs_refcount_get_rec(cur, &ext, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
1821
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec &&
fs/xfs/libxfs/xfs_refcount.c
1823
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1828
ext.rc_startblock = xfs_group_max_blocks(cur->bc_group);
fs/xfs/libxfs/xfs_refcount.c
183
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1837
if (XFS_IS_CORRUPT(cur->bc_mp,
fs/xfs/libxfs/xfs_refcount.c
1839
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1849
trace_xfs_refcount_modify_extent(cur, &tmp);
fs/xfs/libxfs/xfs_refcount.c
1851
error = xfs_refcount_insert(cur, &tmp,
fs/xfs/libxfs/xfs_refcount.c
1855
if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1856
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1863
if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
fs/xfs/libxfs/xfs_refcount.c
1864
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1868
if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
fs/xfs/libxfs/xfs_refcount.c
1869
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
187
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_refcount.c
1873
if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1874
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
1880
trace_xfs_refcount_modify_extent(cur, &ext);
fs/xfs/libxfs/xfs_refcount.c
1881
error = xfs_refcount_delete(cur, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
1884
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
1885
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
189
if (xfs_btree_is_rtrefcount(cur->bc_ops)) {
fs/xfs/libxfs/xfs_refcount.c
1896
trace_xfs_refcount_modify_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
1905
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
1916
error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
fs/xfs/libxfs/xfs_refcount.c
192
cur->bc_group->xg_gno, fa);
fs/xfs/libxfs/xfs_refcount.c
1921
error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW,
fs/xfs/libxfs/xfs_refcount.c
1929
error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_COW, &agbno,
fs/xfs/libxfs/xfs_refcount.c
1935
error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
fs/xfs/libxfs/xfs_refcount.c
1942
trace_xfs_refcount_adjust_cow_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
196
cur->bc_group->xg_gno, fa);
fs/xfs/libxfs/xfs_refcount.c
201
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
2023
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
2030
if (XFS_IS_CORRUPT(cur->bc_mp,
fs/xfs/libxfs/xfs_refcount.c
2032
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
2041
if (xfs_refcount_check_btrec(cur, &rr->rr_rrec) != NULL ||
fs/xfs/libxfs/xfs_refcount.c
2042
XFS_IS_CORRUPT(cur->bc_mp,
fs/xfs/libxfs/xfs_refcount.c
2044
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
2061
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_refcount.c
210
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
2105
cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(xg));
fs/xfs/libxfs/xfs_refcount.c
2110
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, to_perag(xg));
fs/xfs/libxfs/xfs_refcount.c
2114
error = xfs_btree_query_range(cur, &low, &high,
fs/xfs/libxfs/xfs_refcount.c
2116
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_refcount.c
2171
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
218
error = xfs_btree_get_rec(cur, &rec, stat);
fs/xfs/libxfs/xfs_refcount.c
2186
return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
fs/xfs/libxfs/xfs_refcount.c
2197
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
2206
fa = xfs_refcount_check_btrec(cur, &irec);
fs/xfs/libxfs/xfs_refcount.c
2208
return xfs_refcount_complain_bad_rec(cur, fa, &irec);
fs/xfs/libxfs/xfs_refcount.c
2210
return query->fn(cur, &irec, query->priv);
fs/xfs/libxfs/xfs_refcount.c
2216
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
2226
return xfs_btree_query_range(cur, &low_brec, &high_brec,
fs/xfs/libxfs/xfs_refcount.c
223
fa = xfs_refcount_check_btrec(cur, irec);
fs/xfs/libxfs/xfs_refcount.c
225
return xfs_refcount_complain_bad_rec(cur, fa, irec);
fs/xfs/libxfs/xfs_refcount.c
227
trace_xfs_refcount_get(cur, irec);
fs/xfs/libxfs/xfs_refcount.c
238
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
245
trace_xfs_refcount_update(cur, irec);
fs/xfs/libxfs/xfs_refcount.c
253
error = xfs_btree_update(cur, &rec);
fs/xfs/libxfs/xfs_refcount.c
255
trace_xfs_refcount_update_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
266
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
272
trace_xfs_refcount_insert(cur, irec);
fs/xfs/libxfs/xfs_refcount.c
274
cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
fs/xfs/libxfs/xfs_refcount.c
275
cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
fs/xfs/libxfs/xfs_refcount.c
276
cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
fs/xfs/libxfs/xfs_refcount.c
277
cur->bc_rec.rc.rc_domain = irec->rc_domain;
fs/xfs/libxfs/xfs_refcount.c
279
error = xfs_btree_insert(cur, i);
fs/xfs/libxfs/xfs_refcount.c
282
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
fs/xfs/libxfs/xfs_refcount.c
283
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
290
trace_xfs_refcount_insert_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
302
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
309
error = xfs_refcount_get_rec(cur, &irec, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
312
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
313
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
317
trace_xfs_refcount_delete(cur, &irec);
fs/xfs/libxfs/xfs_refcount.c
318
error = xfs_btree_delete(cur, i);
fs/xfs/libxfs/xfs_refcount.c
319
if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
fs/xfs/libxfs/xfs_refcount.c
320
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
326
error = xfs_refcount_lookup_ge(cur, irec.rc_domain, irec.rc_startblock,
fs/xfs/libxfs/xfs_refcount.c
330
trace_xfs_refcount_delete_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
424
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
434
error = xfs_refcount_lookup_le(cur, domain, agbno, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
440
error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
443
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
444
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
454
trace_xfs_refcount_split_extent(cur, &rcext, agbno);
fs/xfs/libxfs/xfs_refcount.c
460
error = xfs_refcount_update(cur, &tmp);
fs/xfs/libxfs/xfs_refcount.c
467
error = xfs_refcount_insert(cur, &tmp, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
470
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
471
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
478
trace_xfs_refcount_split_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
487
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
497
trace_xfs_refcount_merge_center_extents(cur, left, center, right);
fs/xfs/libxfs/xfs_refcount.c
510
error = xfs_refcount_lookup_ge(cur, center->rc_domain,
fs/xfs/libxfs/xfs_refcount.c
514
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
515
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
520
error = xfs_refcount_delete(cur, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
523
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
524
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
53
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
530
error = xfs_refcount_delete(cur, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
533
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
534
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
541
error = xfs_refcount_lookup_le(cur, left->rc_domain,
fs/xfs/libxfs/xfs_refcount.c
545
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
546
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
552
error = xfs_refcount_update(cur, left);
fs/xfs/libxfs/xfs_refcount.c
560
trace_xfs_refcount_merge_center_extents_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
569
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
578
trace_xfs_refcount_merge_left_extent(cur, left, cleft);
fs/xfs/libxfs/xfs_refcount.c
58
trace_xfs_refcount_lookup(cur,
fs/xfs/libxfs/xfs_refcount.c
584
error = xfs_refcount_lookup_le(cur, cleft->rc_domain,
fs/xfs/libxfs/xfs_refcount.c
588
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
589
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
594
error = xfs_refcount_delete(cur, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
597
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
598
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
605
error = xfs_refcount_lookup_le(cur, left->rc_domain,
fs/xfs/libxfs/xfs_refcount.c
609
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
61
cur->bc_rec.rc.rc_startblock = bno;
fs/xfs/libxfs/xfs_refcount.c
610
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
616
error = xfs_refcount_update(cur, left);
fs/xfs/libxfs/xfs_refcount.c
62
cur->bc_rec.rc.rc_blockcount = 0;
fs/xfs/libxfs/xfs_refcount.c
625
trace_xfs_refcount_merge_left_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
63
cur->bc_rec.rc.rc_domain = domain;
fs/xfs/libxfs/xfs_refcount.c
634
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
64
return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
fs/xfs/libxfs/xfs_refcount.c
642
trace_xfs_refcount_merge_right_extent(cur, cright, right);
fs/xfs/libxfs/xfs_refcount.c
651
error = xfs_refcount_lookup_le(cur, cright->rc_domain,
fs/xfs/libxfs/xfs_refcount.c
655
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
656
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
661
error = xfs_refcount_delete(cur, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
664
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
665
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
672
error = xfs_refcount_lookup_le(cur, right->rc_domain,
fs/xfs/libxfs/xfs_refcount.c
676
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
677
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
684
error = xfs_refcount_update(cur, right);
fs/xfs/libxfs/xfs_refcount.c
692
trace_xfs_refcount_merge_right_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
702
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
714
error = xfs_refcount_lookup_le(cur, domain, agbno - 1, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
720
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
723
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
724
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
73
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
736
error = xfs_btree_increment(cur, 0, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
740
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
743
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
744
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
78
trace_xfs_refcount_lookup(cur,
fs/xfs/libxfs/xfs_refcount.c
781
trace_xfs_refcount_find_left_extent(cur, left, cleft, agbno);
fs/xfs/libxfs/xfs_refcount.c
785
trace_xfs_refcount_find_left_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
795
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
807
error = xfs_refcount_lookup_ge(cur, domain, agbno + aglen, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
81
cur->bc_rec.rc.rc_startblock = bno;
fs/xfs/libxfs/xfs_refcount.c
813
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
816
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
817
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
82
cur->bc_rec.rc.rc_blockcount = 0;
fs/xfs/libxfs/xfs_refcount.c
829
error = xfs_btree_decrement(cur, 0, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
83
cur->bc_rec.rc.rc_domain = domain;
fs/xfs/libxfs/xfs_refcount.c
833
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
fs/xfs/libxfs/xfs_refcount.c
836
if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
fs/xfs/libxfs/xfs_refcount.c
837
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_refcount.c
84
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
fs/xfs/libxfs/xfs_refcount.c
874
trace_xfs_refcount_find_right_extent(cur, cright, right,
fs/xfs/libxfs/xfs_refcount.c
879
trace_xfs_refcount_find_right_extent_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_refcount.c
93
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.c
98
trace_xfs_refcount_lookup(cur,
fs/xfs/libxfs/xfs_refcount.h
118
extern int xfs_refcount_has_records(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
128
extern int xfs_refcount_insert(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
137
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
141
int xfs_refcount_query_range(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
17
extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
19
extern int xfs_refcount_lookup_ge(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
21
extern int xfs_refcount_lookup_eq(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
23
extern int xfs_refcount_get_rec(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount.h
89
extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
100
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
103
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_refcount_btree.c
104
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_refcount_btree.c
109
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
fs/xfs/libxfs/xfs_refcount_btree.c
110
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
fs/xfs/libxfs/xfs_refcount_btree.c
116
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
119
return cur->bc_mp->m_refc_mnr[level != 0];
fs/xfs/libxfs/xfs_refcount_btree.c
124
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
127
return cur->bc_mp->m_refc_mxr[level != 0];
fs/xfs/libxfs/xfs_refcount_btree.c
152
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
155
const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
fs/xfs/libxfs/xfs_refcount_btree.c
161
rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
fs/xfs/libxfs/xfs_refcount_btree.c
162
rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
fs/xfs/libxfs/xfs_refcount_btree.c
167
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
170
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
fs/xfs/libxfs/xfs_refcount_btree.c
172
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno));
fs/xfs/libxfs/xfs_refcount_btree.c
179
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
183
const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
fs/xfs/libxfs/xfs_refcount_btree.c
193
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
288
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
298
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
30
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_refcount_btree.c
309
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
32
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
fs/xfs/libxfs/xfs_refcount_btree.c
33
cur->bc_ag.agbp, to_perag(cur->bc_group));
fs/xfs/libxfs/xfs_refcount_btree.c
362
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_refcount_btree.c
366
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_refcountbt_ops,
fs/xfs/libxfs/xfs_refcount_btree.c
368
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_refcount_btree.c
369
cur->bc_refc.nr_ops = 0;
fs/xfs/libxfs/xfs_refcount_btree.c
370
cur->bc_refc.shape_changes = 0;
fs/xfs/libxfs/xfs_refcount_btree.c
371
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_refcount_btree.c
375
cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
fs/xfs/libxfs/xfs_refcount_btree.c
377
return cur;
fs/xfs/libxfs/xfs_refcount_btree.c
38
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
386
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
391
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
fs/xfs/libxfs/xfs_refcount_btree.c
393
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_refcount_btree.c
401
xfs_btree_commit_afakeroot(cur, tp, agbp);
fs/xfs/libxfs/xfs_refcount_btree.c
42
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_refcount_btree.c
52
xfs_alloc_log_agf(cur->bc_tp, agbp,
fs/xfs/libxfs/xfs_refcount_btree.c
58
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_refcount_btree.c
63
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_refcount_btree.c
69
args.tp = cur->bc_tp;
fs/xfs/libxfs/xfs_refcount_btree.c
70
args.mp = cur->bc_mp;
fs/xfs/libxfs/xfs_refcount_btree.c
71
args.pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_refcount_btree.c
84
ASSERT(args.agno == cur->bc_group->xg_gno);
fs/xfs/libxfs/xfs_refcount_btree.c
89
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
fs/xfs/libxfs/xfs_refcount_btree.h
64
void xfs_refcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
100
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
1021
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
1027
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
1045
trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
1053
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, &ltrec,
fs/xfs/libxfs/xfs_rmap.c
1058
trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
106
trace_xfs_rmap_update(cur, irec->rm_startblock, irec->rm_blockcount,
fs/xfs/libxfs/xfs_rmap.c
1069
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1079
error = xfs_btree_increment(cur, 0, &have_gt);
fs/xfs/libxfs/xfs_rmap.c
1083
error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
fs/xfs/libxfs/xfs_rmap.c
1087
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1092
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1096
trace_xfs_rmap_find_right_neighbor_result(cur,
fs/xfs/libxfs/xfs_rmap.c
1136
trace_xfs_rmap_delete(cur, gtrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1139
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
114
error = xfs_btree_update(cur, &rec);
fs/xfs/libxfs/xfs_rmap.c
1143
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1150
error = xfs_btree_decrement(cur, 0, &have_gt);
fs/xfs/libxfs/xfs_rmap.c
1153
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
116
trace_xfs_rmap_update_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
1172
error = xfs_rmap_update(cur, &gtrec);
fs/xfs/libxfs/xfs_rmap.c
1180
cur->bc_rec.r.rm_startblock = bno;
fs/xfs/libxfs/xfs_rmap.c
1181
cur->bc_rec.r.rm_blockcount = len;
fs/xfs/libxfs/xfs_rmap.c
1182
cur->bc_rec.r.rm_owner = owner;
fs/xfs/libxfs/xfs_rmap.c
1183
cur->bc_rec.r.rm_offset = offset;
fs/xfs/libxfs/xfs_rmap.c
1184
cur->bc_rec.r.rm_flags = flags;
fs/xfs/libxfs/xfs_rmap.c
1185
trace_xfs_rmap_insert(cur, bno, len, owner, offset, flags);
fs/xfs/libxfs/xfs_rmap.c
1186
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1190
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1196
trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
1199
trace_xfs_rmap_map_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
1216
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rmap.c
1222
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
fs/xfs/libxfs/xfs_rmap.c
1225
error = xfs_rmap_map(cur, bno, len, false, oinfo);
fs/xfs/libxfs/xfs_rmap.c
1227
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_rmap.c
1249
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
1255
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
1274
trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
1281
error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, &PREV, &i);
fs/xfs/libxfs/xfs_rmap.c
1285
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1290
trace_xfs_rmap_lookup_le_range_result(cur, PREV.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1313
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1318
error = xfs_rmap_get_rec(cur, &LEFT, &i);
fs/xfs/libxfs/xfs_rmap.c
1322
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1329
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1333
trace_xfs_rmap_find_left_neighbor_result(cur,
fs/xfs/libxfs/xfs_rmap.c
1347
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1351
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1355
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1360
error = xfs_rmap_get_rec(cur, &RIGHT, &i);
fs/xfs/libxfs/xfs_rmap.c
1364
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1369
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1373
trace_xfs_rmap_find_right_neighbor_result(cur,
fs/xfs/libxfs/xfs_rmap.c
1392
trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
1395
error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, NULL, &i);
fs/xfs/libxfs/xfs_rmap.c
1399
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1415
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1419
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1423
trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1426
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1430
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1434
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1438
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1442
trace_xfs_rmap_delete(cur, PREV.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1445
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1449
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1453
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1457
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1463
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1473
trace_xfs_rmap_delete(cur, PREV.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1476
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1480
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1484
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1488
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1494
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1504
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1508
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1512
trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1515
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1519
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1523
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1527
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1534
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1547
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1561
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1564
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1569
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1583
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1591
cur->bc_rec.r = NEW;
fs/xfs/libxfs/xfs_rmap.c
1592
trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
fs/xfs/libxfs/xfs_rmap.c
1593
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1597
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1610
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1613
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
1620
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1632
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1635
error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
fs/xfs/libxfs/xfs_rmap.c
1640
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1649
cur->bc_rec.r = NEW;
fs/xfs/libxfs/xfs_rmap.c
1650
trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
fs/xfs/libxfs/xfs_rmap.c
1651
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1655
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1674
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1680
cur->bc_rec.r = NEW;
fs/xfs/libxfs/xfs_rmap.c
1681
trace_xfs_rmap_insert(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1684
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1688
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1697
error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
fs/xfs/libxfs/xfs_rmap.c
1702
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1707
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
fs/xfs/libxfs/xfs_rmap.c
1708
cur->bc_rec.r.rm_flags |= newext;
fs/xfs/libxfs/xfs_rmap.c
1709
trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
fs/xfs/libxfs/xfs_rmap.c
1710
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
1714
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1733
trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
1736
trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
1747
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
1753
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
1772
trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
1779
error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
fs/xfs/libxfs/xfs_rmap.c
1784
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1804
error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, newext,
fs/xfs/libxfs/xfs_rmap.c
1813
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1822
error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
fs/xfs/libxfs/xfs_rmap.c
1828
error = xfs_rmap_get_rec(cur, &RIGHT, &i);
fs/xfs/libxfs/xfs_rmap.c
1832
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1837
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1841
trace_xfs_rmap_find_right_neighbor_result(cur,
fs/xfs/libxfs/xfs_rmap.c
1858
trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
1870
error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1875
error = xfs_rmap_delete(cur, PREV.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1881
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1887
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1892
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1902
error = xfs_rmap_delete(cur, PREV.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1908
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1914
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1919
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1929
error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1935
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1941
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1947
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1959
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1965
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
1970
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
1981
error = xfs_rmap_delete(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1989
error = xfs_rmap_insert(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
1995
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2001
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2006
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
2017
error = xfs_rmap_delete(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2025
error = xfs_rmap_insert(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2030
error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
fs/xfs/libxfs/xfs_rmap.c
2041
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2047
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2052
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
2056
error = xfs_rmap_delete(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2064
error = xfs_rmap_insert(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2077
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2083
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2088
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
2091
error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
fs/xfs/libxfs/xfs_rmap.c
2109
error = xfs_rmap_insert(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2116
error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2122
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2127
error = xfs_rmap_update(cur, &NEW);
fs/xfs/libxfs/xfs_rmap.c
2136
error = xfs_rmap_insert(cur, NEW.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2156
trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
2159
trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
2179
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2185
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
2197
trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
2204
error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
fs/xfs/libxfs/xfs_rmap.c
2209
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2220
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2227
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2236
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2243
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2248
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2255
error = xfs_rmap_delete(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2273
error = xfs_rmap_delete(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2283
error = xfs_rmap_insert(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2299
error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2305
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2310
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
2329
error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2335
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2340
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
2345
error = xfs_rmap_insert(cur, bno + len,
fs/xfs/libxfs/xfs_rmap.c
2353
trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
2356
trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
2371
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2377
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
2391
trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
2394
error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
fs/xfs/libxfs/xfs_rmap.c
2403
error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
fs/xfs/libxfs/xfs_rmap.c
2408
error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
fs/xfs/libxfs/xfs_rmap.c
2412
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2416
trace_xfs_rmap_find_right_neighbor_result(cur,
fs/xfs/libxfs/xfs_rmap.c
2451
error = xfs_rmap_delete(cur, gtrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2459
error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2465
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
2470
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
2486
error = xfs_rmap_delete(cur, gtrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2496
error = xfs_rmap_insert(cur, gtrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2506
error = xfs_rmap_insert(cur, bno, len, owner, offset, flags);
fs/xfs/libxfs/xfs_rmap.c
2511
trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
2514
trace_xfs_rmap_map_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
2521
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2532
return xfs_rmap_map(cur, rmap->rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2537
return xfs_rmap_map_shared(cur, rmap->rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
2551
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2561
fa = xfs_rmap_check_btrec(cur, &irec);
fs/xfs/libxfs/xfs_rmap.c
2563
return xfs_rmap_complain_bad_rec(cur, fa, &irec);
fs/xfs/libxfs/xfs_rmap.c
2565
return query->fn(cur, &irec, query->priv);
fs/xfs/libxfs/xfs_rmap.c
2571
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2581
return xfs_btree_query_range(cur, &low_brec, &high_brec,
fs/xfs/libxfs/xfs_rmap.c
2588
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2596
return xfs_btree_query_all(cur, xfs_rmap_query_range_helper, &query);
fs/xfs/libxfs/xfs_rmap.c
2911
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
2927
return xfs_btree_has_records(cur, &low, &high, &mask, outcome);
fs/xfs/libxfs/xfs_rmap.c
2989
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
3028
if (xfs_rmap_shareable(cur->bc_mp, &roc->good) ^
fs/xfs/libxfs/xfs_rmap.c
3029
xfs_rmap_shareable(cur->bc_mp, &check))
fs/xfs/libxfs/xfs_rmap.c
3042
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
3052
error = xfs_rmap_query_range(cur, &roc.low, &roc.high,
fs/xfs/libxfs/xfs_rmap.c
3073
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
3086
error = xfs_rmap_query_range(cur, &roc.low, &roc.high,
fs/xfs/libxfs/xfs_rmap.c
334
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
337
if (xfs_btree_is_rtrmap(cur->bc_ops) ||
fs/xfs/libxfs/xfs_rmap.c
338
xfs_btree_is_mem_rtrmap(cur->bc_ops))
fs/xfs/libxfs/xfs_rmap.c
339
return xfs_rtrmap_check_irec(to_rtg(cur->bc_group), irec);
fs/xfs/libxfs/xfs_rmap.c
340
return xfs_rmap_check_irec(to_perag(cur->bc_group), irec);
fs/xfs/libxfs/xfs_rmap.c
345
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
349
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
351
if (xfs_btree_is_mem_rmap(cur->bc_ops))
fs/xfs/libxfs/xfs_rmap.c
354
else if (xfs_btree_is_rtrmap(cur->bc_ops))
fs/xfs/libxfs/xfs_rmap.c
357
cur->bc_group->xg_gno, fa);
fs/xfs/libxfs/xfs_rmap.c
361
cur->bc_group->xg_gno, fa);
fs/xfs/libxfs/xfs_rmap.c
366
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
375
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
383
error = xfs_btree_get_rec(cur, &rec, stat);
fs/xfs/libxfs/xfs_rmap.c
389
fa = xfs_rmap_check_btrec(cur, irec);
fs/xfs/libxfs/xfs_rmap.c
39
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
391
return xfs_rmap_complain_bad_rec(cur, fa, irec);
fs/xfs/libxfs/xfs_rmap.c
404
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
410
trace_xfs_rmap_find_left_neighbor_candidate(cur, rec->rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
432
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
460
trace_xfs_rmap_find_left_neighbor_query(cur, bno, 0, owner, offset,
fs/xfs/libxfs/xfs_rmap.c
479
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
fs/xfs/libxfs/xfs_rmap.c
484
error = xfs_rmap_find_left_neighbor_helper(cur, irec, &info);
fs/xfs/libxfs/xfs_rmap.c
486
error = xfs_rmap_query_range(cur, &info.high, &info.high,
fs/xfs/libxfs/xfs_rmap.c
492
trace_xfs_rmap_find_left_neighbor_result(cur, irec->rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
50
cur->bc_rec.r.rm_startblock = bno;
fs/xfs/libxfs/xfs_rmap.c
501
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
507
trace_xfs_rmap_lookup_le_range_candidate(cur, rec->rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
51
cur->bc_rec.r.rm_blockcount = 0;
fs/xfs/libxfs/xfs_rmap.c
52
cur->bc_rec.r.rm_owner = owner;
fs/xfs/libxfs/xfs_rmap.c
53
cur->bc_rec.r.rm_offset = offset;
fs/xfs/libxfs/xfs_rmap.c
531
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
54
cur->bc_rec.r.rm_flags = flags;
fs/xfs/libxfs/xfs_rmap.c
554
trace_xfs_rmap_lookup_le_range(cur, bno, 0, owner, offset, flags);
fs/xfs/libxfs/xfs_rmap.c
56
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
fs/xfs/libxfs/xfs_rmap.c
572
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
fs/xfs/libxfs/xfs_rmap.c
577
error = xfs_rmap_lookup_le_range_helper(cur, irec, &info);
fs/xfs/libxfs/xfs_rmap.c
579
error = xfs_rmap_query_range(cur, &info.high, &info.high,
fs/xfs/libxfs/xfs_rmap.c
585
trace_xfs_rmap_lookup_le_range_result(cur, irec->rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
597
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
60
error = xfs_rmap_get_rec(cur, irec, &get_stat);
fs/xfs/libxfs/xfs_rmap.c
605
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
615
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
622
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
634
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
64
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
640
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
646
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
676
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
682
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rmap.c
697
trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
704
error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, &ltrec, &i);
fs/xfs/libxfs/xfs_rmap.c
708
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
713
trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
729
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
747
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
752
error = xfs_rmap_get_rec(cur, &rtrec, &i);
fs/xfs/libxfs/xfs_rmap.c
756
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
769
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
77
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.c
775
error = xfs_rmap_free_check_owner(cur, ltoff, &ltrec, len, owner,
fs/xfs/libxfs/xfs_rmap.c
782
trace_xfs_rmap_delete(cur, ltrec.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
785
error = xfs_btree_delete(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
789
xfs_btree_mark_sick(cur);
fs/xfs/libxfs/xfs_rmap.c
808
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
823
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
843
error = xfs_rmap_update(cur, &ltrec);
fs/xfs/libxfs/xfs_rmap.c
847
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/libxfs/xfs_rmap.c
85
cur->bc_rec.r.rm_startblock = bno;
fs/xfs/libxfs/xfs_rmap.c
851
cur->bc_rec.r.rm_startblock = bno + len;
fs/xfs/libxfs/xfs_rmap.c
852
cur->bc_rec.r.rm_blockcount = orig_len - len -
fs/xfs/libxfs/xfs_rmap.c
854
cur->bc_rec.r.rm_owner = ltrec.rm_owner;
fs/xfs/libxfs/xfs_rmap.c
856
cur->bc_rec.r.rm_offset = 0;
fs/xfs/libxfs/xfs_rmap.c
858
cur->bc_rec.r.rm_offset = offset + len;
fs/xfs/libxfs/xfs_rmap.c
859
cur->bc_rec.r.rm_flags = flags;
fs/xfs/libxfs/xfs_rmap.c
86
cur->bc_rec.r.rm_blockcount = len;
fs/xfs/libxfs/xfs_rmap.c
860
trace_xfs_rmap_insert(cur, cur->bc_rec.r.rm_startblock,
fs/xfs/libxfs/xfs_rmap.c
861
cur->bc_rec.r.rm_blockcount,
fs/xfs/libxfs/xfs_rmap.c
862
cur->bc_rec.r.rm_owner,
fs/xfs/libxfs/xfs_rmap.c
863
cur->bc_rec.r.rm_offset,
fs/xfs/libxfs/xfs_rmap.c
864
cur->bc_rec.r.rm_flags);
fs/xfs/libxfs/xfs_rmap.c
865
error = xfs_btree_insert(cur, &i);
fs/xfs/libxfs/xfs_rmap.c
87
cur->bc_rec.r.rm_owner = owner;
fs/xfs/libxfs/xfs_rmap.c
871
trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
fs/xfs/libxfs/xfs_rmap.c
874
trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
fs/xfs/libxfs/xfs_rmap.c
88
cur->bc_rec.r.rm_offset = offset;
fs/xfs/libxfs/xfs_rmap.c
89
cur->bc_rec.r.rm_flags = flags;
fs/xfs/libxfs/xfs_rmap.c
90
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
fs/xfs/libxfs/xfs_rmap.c
971
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rmap.c
977
cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
fs/xfs/libxfs/xfs_rmap.c
980
error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
fs/xfs/libxfs/xfs_rmap.c
982
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_rmap.h
126
int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_rmap.h
129
int xfs_rmap_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_rmap.h
135
int xfs_rmap_get_rec(struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec,
fs/xfs/libxfs/xfs_rmap.h
139
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.h
143
int xfs_rmap_query_range(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap.h
147
int xfs_rmap_query_all(struct xfs_btree_cur *cur, xfs_rmap_query_range_fn fn,
fs/xfs/libxfs/xfs_rmap.h
201
int xfs_rmap_lookup_le_range(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_rmap.h
214
int xfs_rmap_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_rmap.h
228
int xfs_rmap_count_owners(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_rmap.h
231
int xfs_rmap_has_other_keys(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/libxfs/xfs_rmap.h
234
int xfs_rmap_map_raw(struct xfs_btree_cur *cur, struct xfs_rmap_irec *rmap);
fs/xfs/libxfs/xfs_rmap_btree.c
110
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
fs/xfs/libxfs/xfs_rmap_btree.c
124
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
127
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_rmap_btree.c
129
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_rmap_btree.c
133
bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
fs/xfs/libxfs/xfs_rmap_btree.c
135
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
fs/xfs/libxfs/xfs_rmap_btree.c
136
error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
fs/xfs/libxfs/xfs_rmap_btree.c
140
xfs_extent_busy_insert(cur->bc_tp, pag_group(pag), bno, 1,
fs/xfs/libxfs/xfs_rmap_btree.c
149
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
152
return cur->bc_mp->m_rmap_mnr[level != 0];
fs/xfs/libxfs/xfs_rmap_btree.c
157
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
160
return cur->bc_mp->m_rmap_mxr[level != 0];
fs/xfs/libxfs/xfs_rmap_btree.c
214
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
217
rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
fs/xfs/libxfs/xfs_rmap_btree.c
218
rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
fs/xfs/libxfs/xfs_rmap_btree.c
219
rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
fs/xfs/libxfs/xfs_rmap_btree.c
221
xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
fs/xfs/libxfs/xfs_rmap_btree.c
226
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
229
struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
fs/xfs/libxfs/xfs_rmap_btree.c
231
ASSERT(cur->bc_group->xg_gno == be32_to_cpu(agf->agf_seqno));
fs/xfs/libxfs/xfs_rmap_btree.c
248
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
251
struct xfs_rmap_irec *rec = &cur->bc_rec.r;
fs/xfs/libxfs/xfs_rmap_btree.c
262
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
395
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
425
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
455
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
517
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rmap_btree.c
519
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rmapbt_ops,
fs/xfs/libxfs/xfs_rmap_btree.c
521
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_rmap_btree.c
522
cur->bc_ag.agbp = agbp;
fs/xfs/libxfs/xfs_rmap_btree.c
526
cur->bc_nlevels = be32_to_cpu(agf->agf_rmap_level);
fs/xfs/libxfs/xfs_rmap_btree.c
528
return cur;
fs/xfs/libxfs/xfs_rmap_btree.c
57
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_rmap_btree.c
59
return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
fs/xfs/libxfs/xfs_rmap_btree.c
60
cur->bc_ag.agbp, to_perag(cur->bc_group));
fs/xfs/libxfs/xfs_rmap_btree.c
629
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rmap_btree.c
631
cur = xfs_btree_alloc_cursor(pag_mount(pag), tp, &xfs_rmapbt_mem_ops,
fs/xfs/libxfs/xfs_rmap_btree.c
633
cur->bc_mem.xfbtree = xfbt;
fs/xfs/libxfs/xfs_rmap_btree.c
634
cur->bc_nlevels = xfbt->nlevels;
fs/xfs/libxfs/xfs_rmap_btree.c
636
cur->bc_group = xfs_group_hold(pag_group(pag));
fs/xfs/libxfs/xfs_rmap_btree.c
637
return cur;
fs/xfs/libxfs/xfs_rmap_btree.c
65
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
681
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
686
struct xbtree_afakeroot *afake = cur->bc_ag.afake;
fs/xfs/libxfs/xfs_rmap_btree.c
688
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_rmap_btree.c
69
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_rmap_btree.c
695
xfs_btree_commit_afakeroot(cur, tp, agbp);
fs/xfs/libxfs/xfs_rmap_btree.c
71
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_rmap_btree.c
79
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
fs/xfs/libxfs/xfs_rmap_btree.c
84
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rmap_btree.c
89
struct xfs_buf *agbp = cur->bc_ag.agbp;
fs/xfs/libxfs/xfs_rmap_btree.c
91
struct xfs_perag *pag = to_perag(cur->bc_group);
fs/xfs/libxfs/xfs_rmap_btree.c
97
error = xfs_alloc_get_freelist(pag, cur->bc_tp, cur->bc_ag.agbp,
fs/xfs/libxfs/xfs_rmap_btree.h
48
void xfs_rmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
108
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
111
if (level != cur->bc_nlevels - 1)
fs/xfs/libxfs/xfs_rtrefcount_btree.c
112
return cur->bc_mp->m_rtrefc_mxr[level != 0];
fs/xfs/libxfs/xfs_rtrefcount_btree.c
113
return xfs_rtrefcountbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
138
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
141
const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
147
rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
148
rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
153
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
161
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
165
const struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
175
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
254
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
264
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
275
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
304
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
307
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
308
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
312
const unsigned int level = cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
363
xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_rtrefcount_btree.c
407
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
411
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrefcountbt_ops,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
414
cur->bc_ino.ip = ip;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
415
cur->bc_refc.nr_ops = 0;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
416
cur->bc_refc.shape_changes = 0;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
417
cur->bc_group = xfs_group_hold(rtg_group(rtg));
fs/xfs/libxfs/xfs_rtrefcount_btree.c
418
cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
419
cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
420
cur->bc_ino.whichfork = XFS_DATA_FORK;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
421
return cur;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
430
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
433
struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
437
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
445
ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
449
cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno;
fs/xfs/libxfs/xfs_rtrefcount_btree.c
450
xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
451
xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
46
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_rtrefcount_btree.c
48
return xfs_rtrefcountbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group));
fs/xfs/libxfs/xfs_rtrefcount_btree.c
53
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
56
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_rtrefcount_btree.c
57
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
59
return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
63
return cur->bc_mp->m_rtrefc_mnr[level != 0];
fs/xfs/libxfs/xfs_rtrefcount_btree.c
68
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
71
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_rtrefcount_btree.c
72
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_rtrefcount_btree.c
74
return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
fs/xfs/libxfs/xfs_rtrefcount_btree.c
78
return cur->bc_mp->m_rtrefc_mxr[level != 0];
fs/xfs/libxfs/xfs_rtrefcount_btree.h
23
void xfs_rtrefcountbt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
1004
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rtrmap_btree.c
1010
cur = xfs_rtrmapbt_init_cursor(tp, rtg);
fs/xfs/libxfs/xfs_rtrmap_btree.c
1011
error = xfs_rmap_map_raw(cur, &rmap);
fs/xfs/libxfs/xfs_rtrmap_btree.c
1012
xfs_btree_del_cursor(cur, error);
fs/xfs/libxfs/xfs_rtrmap_btree.c
1025
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rtrmap_btree.c
1029
cur = xfs_rtrmapbt_init_cursor(NULL, rtg);
fs/xfs/libxfs/xfs_rtrmap_btree.c
1030
xfs_btree_get_keys(cur, block, &key);
fs/xfs/libxfs/xfs_rtrmap_btree.c
1031
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
fs/xfs/libxfs/xfs_rtrmap_btree.c
108
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
111
if (level != cur->bc_nlevels - 1)
fs/xfs/libxfs/xfs_rtrmap_btree.c
112
return cur->bc_mp->m_rtrmap_mxr[level != 0];
fs/xfs/libxfs/xfs_rtrmap_btree.c
113
return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0);
fs/xfs/libxfs/xfs_rtrmap_btree.c
160
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
163
rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
fs/xfs/libxfs/xfs_rtrmap_btree.c
164
rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
fs/xfs/libxfs/xfs_rtrmap_btree.c
165
rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
fs/xfs/libxfs/xfs_rtrmap_btree.c
167
xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
fs/xfs/libxfs/xfs_rtrmap_btree.c
172
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
190
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
193
struct xfs_rmap_irec *rec = &cur->bc_rec.r;
fs/xfs/libxfs/xfs_rtrmap_btree.c
204
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
309
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
339
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
369
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
405
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
408
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/libxfs/xfs_rtrmap_btree.c
409
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_rtrmap_btree.c
413
const unsigned int level = cur->bc_nlevels - 1;
fs/xfs/libxfs/xfs_rtrmap_btree.c
464
xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork));
fs/xfs/libxfs/xfs_rtrmap_btree.c
48
struct xfs_btree_cur *cur)
fs/xfs/libxfs/xfs_rtrmap_btree.c
50
return xfs_rtrmapbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group));
fs/xfs/libxfs/xfs_rtrmap_btree.c
510
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rtrmap_btree.c
514
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_ops,
fs/xfs/libxfs/xfs_rtrmap_btree.c
517
cur->bc_ino.ip = ip;
fs/xfs/libxfs/xfs_rtrmap_btree.c
518
cur->bc_group = xfs_group_hold(rtg_group(rtg));
fs/xfs/libxfs/xfs_rtrmap_btree.c
519
cur->bc_ino.whichfork = XFS_DATA_FORK;
fs/xfs/libxfs/xfs_rtrmap_btree.c
520
cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1;
fs/xfs/libxfs/xfs_rtrmap_btree.c
521
cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_rtrmap_btree.c
523
return cur;
fs/xfs/libxfs/xfs_rtrmap_btree.c
55
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
58
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_rtrmap_btree.c
59
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_rtrmap_btree.c
61
return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
fs/xfs/libxfs/xfs_rtrmap_btree.c
618
struct xfs_btree_cur *cur;
fs/xfs/libxfs/xfs_rtrmap_btree.c
620
cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_mem_ops,
fs/xfs/libxfs/xfs_rtrmap_btree.c
622
cur->bc_mem.xfbtree = xfbt;
fs/xfs/libxfs/xfs_rtrmap_btree.c
623
cur->bc_nlevels = xfbt->nlevels;
fs/xfs/libxfs/xfs_rtrmap_btree.c
624
cur->bc_group = xfs_group_hold(rtg_group(rtg));
fs/xfs/libxfs/xfs_rtrmap_btree.c
625
return cur;
fs/xfs/libxfs/xfs_rtrmap_btree.c
647
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
65
return cur->bc_mp->m_rtrmap_mnr[level != 0];
fs/xfs/libxfs/xfs_rtrmap_btree.c
650
struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake;
fs/xfs/libxfs/xfs_rtrmap_btree.c
654
ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
fs/xfs/libxfs/xfs_rtrmap_btree.c
662
ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_rtrmap_btree.c
666
cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno;
fs/xfs/libxfs/xfs_rtrmap_btree.c
667
xfs_trans_log_inode(tp, cur->bc_ino.ip, flags);
fs/xfs/libxfs/xfs_rtrmap_btree.c
668
xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK);
fs/xfs/libxfs/xfs_rtrmap_btree.c
70
struct xfs_btree_cur *cur,
fs/xfs/libxfs/xfs_rtrmap_btree.c
73
if (level == cur->bc_nlevels - 1) {
fs/xfs/libxfs/xfs_rtrmap_btree.c
74
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
fs/xfs/libxfs/xfs_rtrmap_btree.c
76
return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes,
fs/xfs/libxfs/xfs_rtrmap_btree.c
80
return cur->bc_mp->m_rtrmap_mxr[level != 0];
fs/xfs/libxfs/xfs_rtrmap_btree.h
24
void xfs_rtrmapbt_commit_staged_btree(struct xfs_btree_cur *cur,
fs/xfs/scrub/agb_bitmap.c
54
struct xfs_btree_cur *cur,
fs/xfs/scrub/agb_bitmap.c
63
xfs_btree_get_block(cur, level, &bp);
fs/xfs/scrub/agb_bitmap.c
67
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
fs/xfs/scrub/agb_bitmap.c
68
agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/agb_bitmap.c
77
struct xfs_btree_cur *cur)
fs/xfs/scrub/agb_bitmap.c
79
return xfs_btree_visit_blocks(cur, xagb_bitmap_visit_btblock,
fs/xfs/scrub/agb_bitmap.c
91
struct xfs_btree_cur *cur)
fs/xfs/scrub/agb_bitmap.c
96
for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) {
fs/xfs/scrub/agb_bitmap.c
97
error = xagb_bitmap_visit_btblock(cur, i, bitmap);
fs/xfs/scrub/agb_bitmap.h
64
struct xfs_btree_cur *cur);
fs/xfs/scrub/agb_bitmap.h
66
struct xfs_btree_cur *cur);
fs/xfs/scrub/agheader.c
435
struct xfs_btree_cur *cur,
fs/xfs/scrub/agheader_repair.c
1241
struct xfs_btree_cur *cur,
fs/xfs/scrub/agheader_repair.c
1248
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/scrub/agheader_repair.c
1311
struct xfs_btree_cur *cur;
fs/xfs/scrub/agheader_repair.c
1314
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
fs/xfs/scrub/agheader_repair.c
1315
error = xfs_btree_query_all(cur, xrep_iunlink_mark_ondisk_rec, ragi);
fs/xfs/scrub/agheader_repair.c
1316
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
255
struct xfs_btree_cur *cur = NULL;
fs/xfs/scrub/agheader_repair.c
263
cur = xfs_bnobt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/agheader_repair.c
264
error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
fs/xfs/scrub/agheader_repair.c
267
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/scrub/agheader_repair.c
270
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
276
cur = xfs_cntbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/agheader_repair.c
277
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/scrub/agheader_repair.c
280
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
284
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/agheader_repair.c
285
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/scrub/agheader_repair.c
288
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
296
cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
fs/xfs/scrub/agheader_repair.c
298
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/scrub/agheader_repair.c
301
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
307
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
467
struct xfs_btree_cur *cur,
fs/xfs/scrub/agheader_repair.c
485
return xagb_bitmap_set_btcur_path(&ra->agmetablocks, cur);
fs/xfs/scrub/agheader_repair.c
539
struct xfs_btree_cur *cur;
fs/xfs/scrub/agheader_repair.c
548
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/agheader_repair.c
549
error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
fs/xfs/scrub/agheader_repair.c
550
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
555
cur = xfs_bnobt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/agheader_repair.c
556
error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur);
fs/xfs/scrub/agheader_repair.c
557
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
562
cur = xfs_cntbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/agheader_repair.c
563
error = xagb_bitmap_set_btblocks(&ra.agmetablocks, cur);
fs/xfs/scrub/agheader_repair.c
564
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
941
struct xfs_btree_cur *cur;
fs/xfs/scrub/agheader_repair.c
948
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
fs/xfs/scrub/agheader_repair.c
949
error = xfs_ialloc_count_inodes(cur, &count, &freecount);
fs/xfs/scrub/agheader_repair.c
95
struct xfs_btree_cur *cur,
fs/xfs/scrub/agheader_repair.c
955
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/scrub/agheader_repair.c
960
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
968
cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp, agi_bp);
fs/xfs/scrub/agheader_repair.c
969
error = xfs_btree_count_blocks(cur, &blocks);
fs/xfs/scrub/agheader_repair.c
972
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/agheader_repair.c
978
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/alloc.c
127
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/alloc.c
142
if (xfs_alloc_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
fs/xfs/scrub/alloc.c
143
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/alloc.c
159
struct xfs_btree_cur *cur;
fs/xfs/scrub/alloc.c
163
cur = sc->sa.bno_cur;
fs/xfs/scrub/alloc.c
166
cur = sc->sa.cnt_cur;
fs/xfs/scrub/alloc.c
173
return xchk_btree(sc, cur, xchk_allocbt_rec, &XFS_RMAP_OINFO_AG, &ca);
fs/xfs/scrub/alloc_repair.c
225
struct xfs_btree_cur *cur,
fs/xfs/scrub/alloc_repair.c
241
error = xagb_bitmap_set_btcur_path(&ra->not_allocbt_blocks, cur);
fs/xfs/scrub/alloc_repair.c
302
xfarray_idx_t cur = XFARRAY_CURSOR_INIT;
fs/xfs/scrub/alloc_repair.c
310
while ((error = xfarray_iter(ra->free_records, &cur, &arec)) == 1) {
fs/xfs/scrub/alloc_repair.c
604
struct xfs_btree_cur *cur,
fs/xfs/scrub/alloc_repair.c
610
struct xfs_alloc_rec_incore *arec = &cur->bc_rec.a;
fs/xfs/scrub/alloc_repair.c
624
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/alloc_repair.c
625
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/alloc_repair.c
634
struct xfs_btree_cur *cur,
fs/xfs/scrub/alloc_repair.c
640
return xrep_newbt_claim_block(cur, &ra->new_bnobt, ptr);
fs/xfs/scrub/bmap.c
530
struct xfs_inode *ip = bs->cur->bc_ino.ip;
fs/xfs/scrub/bmap.c
541
if (xfs_has_crc(bs->cur->bc_mp) &&
fs/xfs/scrub/bmap.c
542
bs->cur->bc_levels[0].ptr == 1) {
fs/xfs/scrub/bmap.c
543
for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
fs/xfs/scrub/bmap.c
544
block = xfs_btree_get_block(bs->cur, i, &bp);
fs/xfs/scrub/bmap.c
591
struct xfs_btree_cur *cur;
fs/xfs/scrub/bmap.c
602
cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
fs/xfs/scrub/bmap.c
604
error = xchk_btree(sc, cur, xchk_bmapbt_rec, &oinfo, info);
fs/xfs/scrub/bmap.c
605
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/bmap.c
619
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap.c
664
xfs_gbno_to_fsb(cur->bc_group, check_rec.rm_startblock))
fs/xfs/scrub/bmap.c
697
struct xfs_btree_cur *cur;
fs/xfs/scrub/bmap.c
705
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, pag);
fs/xfs/scrub/bmap.c
709
error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
fs/xfs/scrub/bmap.c
713
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/bmap.c
725
struct xfs_btree_cur *cur;
fs/xfs/scrub/bmap.c
729
cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
fs/xfs/scrub/bmap.c
733
error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri);
fs/xfs/scrub/bmap.c
737
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/bmap_repair.c
104
struct xfs_btree_cur *cur;
fs/xfs/scrub/bmap_repair.c
112
cur = sc->sr.refc_cur;
fs/xfs/scrub/bmap_repair.c
115
cur = sc->sa.refc_cur;
fs/xfs/scrub/bmap_repair.c
117
error = xfs_refcount_find_shared(cur, agbno, blockcount, &fbno, &flen,
fs/xfs/scrub/bmap_repair.c
192
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
208
if (!xfs_verify_agbext(to_perag(cur->bc_group), rec->rm_startblock,
fs/xfs/scrub/bmap_repair.c
244
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
258
error = xrep_bmap_check_fork_rmap(rb, cur, rec);
fs/xfs/scrub/bmap_repair.c
280
fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), rec->rm_startblock);
fs/xfs/scrub/bmap_repair.c
376
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
396
if (!xfs_verify_rgbext(to_rtg(cur->bc_group), rec->rm_startblock,
fs/xfs/scrub/bmap_repair.c
408
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
422
error = xrep_bmap_check_rtfork_rmap(rb->sc, cur, rec);
fs/xfs/scrub/bmap_repair.c
441
xfs_rgbno_to_rtb(to_rtg(cur->bc_group),
fs/xfs/scrub/bmap_repair.c
561
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
568
struct xfs_bmbt_irec *irec = &cur->bc_rec.b;
fs/xfs/scrub/bmap_repair.c
584
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/bmap_repair.c
585
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/bmap_repair.c
594
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
600
return xrep_newbt_claim_block(cur, &rb->new_bmapbt, ptr);
fs/xfs/scrub/bmap_repair.c
606
struct xfs_btree_cur *cur,
fs/xfs/scrub/bmap_repair.c
613
return xfs_bmap_broot_space_calc(cur->bc_mp, nr_this_level);
fs/xfs/scrub/btree.c
100
trace_xchk_btree_error(sc, cur, level,
fs/xfs/scrub/btree.c
107
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
110
__xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
fs/xfs/scrub/btree.c
117
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
120
__xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
fs/xfs/scrub/btree.c
127
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
130
__xchk_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_PREEN,
fs/xfs/scrub/btree.c
142
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
151
block = xfs_btree_get_block(cur, 0, &bp);
fs/xfs/scrub/btree.c
152
rec = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr, block);
fs/xfs/scrub/btree.c
154
trace_xchk_btree_rec(bs->sc, cur, 0);
fs/xfs/scrub/btree.c
158
!cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
fs/xfs/scrub/btree.c
159
xchk_btree_set_corrupt(bs->sc, cur, 0);
fs/xfs/scrub/btree.c
160
memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
fs/xfs/scrub/btree.c
163
if (cur->bc_nlevels == 1)
fs/xfs/scrub/btree.c
167
cur->bc_ops->init_key_from_rec(&key, rec);
fs/xfs/scrub/btree.c
168
keyblock = xfs_btree_get_block(cur, 1, &bp);
fs/xfs/scrub/btree.c
169
keyp = xfs_btree_key_addr(cur, cur->bc_levels[1].ptr, keyblock);
fs/xfs/scrub/btree.c
170
if (xfs_btree_keycmp_lt(cur, &key, keyp))
fs/xfs/scrub/btree.c
171
xchk_btree_set_corrupt(bs->sc, cur, 1);
fs/xfs/scrub/btree.c
173
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
fs/xfs/scrub/btree.c
177
cur->bc_ops->init_high_key_from_rec(&hkey, rec);
fs/xfs/scrub/btree.c
178
keyp = xfs_btree_high_key_addr(cur, cur->bc_levels[1].ptr, keyblock);
fs/xfs/scrub/btree.c
179
if (xfs_btree_keycmp_lt(cur, keyp, &hkey))
fs/xfs/scrub/btree.c
180
xchk_btree_set_corrupt(bs->sc, cur, 1);
fs/xfs/scrub/btree.c
192
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
199
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/scrub/btree.c
200
key = xfs_btree_key_addr(cur, cur->bc_levels[level].ptr, block);
fs/xfs/scrub/btree.c
202
trace_xchk_btree_key(bs->sc, cur, level);
fs/xfs/scrub/btree.c
206
!cur->bc_ops->keys_inorder(cur, &bs->lastkey[level - 1].key, key))
fs/xfs/scrub/btree.c
207
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
208
memcpy(&bs->lastkey[level - 1].key, key, cur->bc_ops->key_len);
fs/xfs/scrub/btree.c
211
if (level + 1 >= cur->bc_nlevels)
fs/xfs/scrub/btree.c
215
keyblock = xfs_btree_get_block(cur, level + 1, &bp);
fs/xfs/scrub/btree.c
216
keyp = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr, keyblock);
fs/xfs/scrub/btree.c
217
if (xfs_btree_keycmp_lt(cur, key, keyp))
fs/xfs/scrub/btree.c
218
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
220
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
fs/xfs/scrub/btree.c
224
key = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, block);
fs/xfs/scrub/btree.c
225
keyp = xfs_btree_high_key_addr(cur, cur->bc_levels[level + 1].ptr,
fs/xfs/scrub/btree.c
227
if (xfs_btree_keycmp_lt(cur, keyp, key))
fs/xfs/scrub/btree.c
228
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
242
if (bs->cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
fs/xfs/scrub/btree.c
243
level == bs->cur->bc_nlevels)
fs/xfs/scrub/btree.c
247
if (__xfs_btree_check_ptr(bs->cur, ptr, 0, level)) {
fs/xfs/scrub/btree.c
248
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
fs/xfs/scrub/btree.c
263
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
271
error = xfs_btree_dup_cursor(cur, &ncur);
fs/xfs/scrub/btree.c
272
if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error) ||
fs/xfs/scrub/btree.c
28
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
280
if (xfs_btree_ptr_is_null(cur, sibling)) {
fs/xfs/scrub/btree.c
286
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
296
if (!xchk_btree_process_error(bs->sc, cur, level + 1, &error))
fs/xfs/scrub/btree.c
299
xchk_btree_set_corrupt(bs->sc, cur, level + 1);
fs/xfs/scrub/btree.c
311
if (xfs_btree_cmp_two_ptrs(cur, pp, sibling))
fs/xfs/scrub/btree.c
312
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
324
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
330
xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
fs/xfs/scrub/btree.c
331
xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
fs/xfs/scrub/btree.c
335
if (level == cur->bc_nlevels - 1) {
fs/xfs/scrub/btree.c
336
if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
fs/xfs/scrub/btree.c
337
!xfs_btree_ptr_is_null(cur, &rightsib))
fs/xfs/scrub/btree.c
338
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
379
if (!bs->cur)
fs/xfs/scrub/btree.c
382
is_bnobt = xfs_btree_is_bno(bs->cur->bc_ops);
fs/xfs/scrub/btree.c
383
is_rmapbt = xfs_btree_is_rmap(bs->cur->bc_ops);
fs/xfs/scrub/btree.c
384
agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
fs/xfs/scrub/btree.c
385
agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
fs/xfs/scrub/btree.c
392
init_sa = bs->cur->bc_ops->type != XFS_BTREE_TYPE_AG;
fs/xfs/scrub/btree.c
395
if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
fs/xfs/scrub/btree.c
407
bs->cur = NULL;
fs/xfs/scrub/btree.c
411
bs->cur = NULL;
fs/xfs/scrub/btree.c
427
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
436
if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE)
fs/xfs/scrub/btree.c
437
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
fs/xfs/scrub/btree.c
449
if (xfs_btree_is_bno(cur->bc_ops) || xfs_btree_is_rmap(cur->bc_ops)) {
fs/xfs/scrub/btree.c
482
if (xfs_btree_is_bmap(bs->cur->bc_ops) &&
fs/xfs/scrub/btree.c
483
bs->cur->bc_ino.whichfork == XFS_DATA_FORK &&
fs/xfs/scrub/btree.c
500
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
501
unsigned int root_level = cur->bc_nlevels - 1;
fs/xfs/scrub/btree.c
505
if (numrecs >= cur->bc_ops->get_minrecs(cur, level))
fs/xfs/scrub/btree.c
515
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
fs/xfs/scrub/btree.c
516
level == cur->bc_nlevels - 2) {
fs/xfs/scrub/btree.c
52
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
fs/xfs/scrub/btree.c
521
root_block = xfs_btree_get_block(cur, root_level, &root_bp);
fs/xfs/scrub/btree.c
522
root_maxrecs = cur->bc_ops->get_dmaxrecs(cur, root_level);
fs/xfs/scrub/btree.c
526
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
53
trace_xchk_ifork_btree_op_error(sc, cur, level,
fs/xfs/scrub/btree.c
535
xchk_btree_set_corrupt(bs->sc, cur, level);
fs/xfs/scrub/btree.c
551
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
555
if (level == cur->bc_nlevels - 1)
fs/xfs/scrub/btree.c
558
xfs_btree_get_keys(cur, block, &block_key);
fs/xfs/scrub/btree.c
56
trace_xchk_btree_op_error(sc, cur, level,
fs/xfs/scrub/btree.c
561
parent_block = xfs_btree_get_block(cur, level + 1, &bp);
fs/xfs/scrub/btree.c
562
parent_low_key = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr,
fs/xfs/scrub/btree.c
564
if (xfs_btree_keycmp_ne(cur, &block_key, parent_low_key)) {
fs/xfs/scrub/btree.c
565
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
fs/xfs/scrub/btree.c
569
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
fs/xfs/scrub/btree.c
573
parent_high_key = xfs_btree_high_key_addr(cur,
fs/xfs/scrub/btree.c
574
cur->bc_levels[level + 1].ptr, parent_block);
fs/xfs/scrub/btree.c
575
block_high_key = xfs_btree_high_key_from_key(cur, &block_key);
fs/xfs/scrub/btree.c
576
if (xfs_btree_keycmp_ne(cur, block_high_key, parent_high_key))
fs/xfs/scrub/btree.c
577
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
fs/xfs/scrub/btree.c
597
error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
fs/xfs/scrub/btree.c
598
if (!xchk_btree_process_error(bs->sc, bs->cur, level, &error) ||
fs/xfs/scrub/btree.c
602
xfs_btree_get_block(bs->cur, level, pbp);
fs/xfs/scrub/btree.c
603
if (__xfs_btree_check_block(bs->cur, *pblock, level, *pbp)) {
fs/xfs/scrub/btree.c
604
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
fs/xfs/scrub/btree.c
643
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/btree.c
650
if (level >= cur->bc_nlevels - 1)
fs/xfs/scrub/btree.c
654
xfs_btree_get_keys(cur, block, &block_keys);
fs/xfs/scrub/btree.c
657
parent_block = xfs_btree_get_block(cur, level + 1, &bp);
fs/xfs/scrub/btree.c
658
parent_keys = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr,
fs/xfs/scrub/btree.c
66
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
661
if (xfs_btree_keycmp_ne(cur, &block_keys, parent_keys))
fs/xfs/scrub/btree.c
662
xchk_btree_set_corrupt(bs->sc, cur, 1);
fs/xfs/scrub/btree.c
664
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
fs/xfs/scrub/btree.c
668
high_bk = xfs_btree_high_key_from_key(cur, &block_keys);
fs/xfs/scrub/btree.c
669
high_pk = xfs_btree_high_key_addr(cur, cur->bc_levels[level + 1].ptr,
fs/xfs/scrub/btree.c
672
if (xfs_btree_keycmp_ne(cur, high_bk, high_pk))
fs/xfs/scrub/btree.c
673
xchk_btree_set_corrupt(bs->sc, cur, 1);
fs/xfs/scrub/btree.c
684
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
70
return __xchk_btree_process_error(sc, cur, level, error,
fs/xfs/scrub/btree.c
706
cur_sz = xchk_btree_sizeof(cur->bc_nlevels);
fs/xfs/scrub/btree.c
708
xchk_btree_set_corrupt(sc, cur, 0);
fs/xfs/scrub/btree.c
714
bs->cur = cur;
fs/xfs/scrub/btree.c
727
level = cur->bc_nlevels - 1;
fs/xfs/scrub/btree.c
728
xfs_btree_init_ptr_from_cur(cur, &ptr);
fs/xfs/scrub/btree.c
729
if (!xchk_btree_ptr_ok(bs, cur->bc_nlevels, &ptr))
fs/xfs/scrub/btree.c
735
cur->bc_levels[level].ptr = 1;
fs/xfs/scrub/btree.c
737
while (level < cur->bc_nlevels) {
fs/xfs/scrub/btree.c
738
block = xfs_btree_get_block(cur, level, &bp);
fs/xfs/scrub/btree.c
742
if (cur->bc_levels[level].ptr >
fs/xfs/scrub/btree.c
745
if (level < cur->bc_nlevels - 1)
fs/xfs/scrub/btree.c
746
cur->bc_levels[level + 1].ptr++;
fs/xfs/scrub/btree.c
755
recp = xfs_btree_rec_addr(cur, cur->bc_levels[0].ptr,
fs/xfs/scrub/btree.c
764
cur->bc_levels[level].ptr++;
fs/xfs/scrub/btree.c
769
if (cur->bc_levels[level].ptr >
fs/xfs/scrub/btree.c
77
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
772
if (level < cur->bc_nlevels - 1)
fs/xfs/scrub/btree.c
773
cur->bc_levels[level + 1].ptr++;
fs/xfs/scrub/btree.c
782
pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block);
fs/xfs/scrub/btree.c
784
cur->bc_levels[level].ptr++;
fs/xfs/scrub/btree.c
792
cur->bc_levels[level].ptr = 1;
fs/xfs/scrub/btree.c
798
if (!error && bs->cur)
fs/xfs/scrub/btree.c
81
return __xchk_btree_process_error(sc, cur, level, error,
fs/xfs/scrub/btree.c
89
struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.c
96
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
fs/xfs/scrub/btree.c
97
trace_xchk_ifork_btree_error(sc, cur, level,
fs/xfs/scrub/btree.h
13
struct xfs_btree_cur *cur, int level, int *error);
fs/xfs/scrub/btree.h
17
struct xfs_btree_cur *cur, int level, int *error);
fs/xfs/scrub/btree.h
21
struct xfs_btree_cur *cur, int level);
fs/xfs/scrub/btree.h
22
void xchk_btree_set_preen(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/btree.h
27
struct xfs_btree_cur *cur, int level);
fs/xfs/scrub/btree.h
42
struct xfs_btree_cur *cur;
fs/xfs/scrub/btree.h
66
int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/common.c
1693
struct xfs_btree_cur *cur;
fs/xfs/scrub/common.c
1703
cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
fs/xfs/scrub/common.c
1706
cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg);
fs/xfs/scrub/common.c
1713
error = xfs_btree_count_blocks(cur, count);
fs/xfs/scrub/common.c
1714
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/common.c
387
struct xfs_btree_cur *cur,
fs/xfs/scrub/common.c
414
struct xfs_btree_cur *cur,
fs/xfs/scrub/common.c
424
return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
fs/xfs/scrub/common.h
169
int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/cow_repair.c
141
struct xfs_btree_cur *cur,
fs/xfs/scrub/cow_repair.c
155
xfs_gbno_to_fsb(cur->bc_group, rrec.rc_startblock),
fs/xfs/scrub/cow_repair.c
169
struct xfs_btree_cur *cur,
fs/xfs/scrub/cow_repair.c
187
xfs_gbno_to_fsb(cur->bc_group, xc->next_bno),
fs/xfs/scrub/cow_repair.c
203
struct xfs_btree_cur *cur,
fs/xfs/scrub/cow_repair.c
230
xfs_gbno_to_fsb(cur->bc_group, rec_bno), rec_len);
fs/xfs/scrub/ialloc.c
101
xchk_btree_xref_set_corrupt(sc, cur, 0);
fs/xfs/scrub/ialloc.c
103
xchk_btree_xref_set_corrupt(sc, cur, 0);
fs/xfs/scrub/ialloc.c
123
xchk_btree_xref_set_corrupt(sc, cur, 0);
fs/xfs/scrub/ialloc.c
176
struct xfs_btree_cur *cur = sc->sa.ino_cur;
fs/xfs/scrub/ialloc.c
182
ASSERT(xfs_btree_is_ino(cur->bc_ops));
fs/xfs/scrub/ialloc.c
184
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record);
fs/xfs/scrub/ialloc.c
190
error = xfs_inobt_get_rec(cur, &irec, &has_record);
fs/xfs/scrub/ialloc.c
204
xchk_btree_xref_set_corrupt(sc, cur, 0);
fs/xfs/scrub/ialloc.c
206
xchk_btree_xref_set_corrupt(sc, cur, 0);
fs/xfs/scrub/ialloc.c
211
xchk_btree_xref_set_corrupt(sc, cur, 0);
fs/xfs/scrub/ialloc.c
260
struct xfs_mount *mp = bs->cur->bc_mp;
fs/xfs/scrub/ialloc.c
261
struct xfs_perag *pag = to_perag(bs->cur->bc_group);
fs/xfs/scrub/ialloc.c
269
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
321
fsino = xfs_agino_to_ino(to_perag(bs->cur->bc_group), agino);
fs/xfs/scrub/ialloc.c
326
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
348
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
367
struct xfs_mount *mp = bs->cur->bc_mp;
fs/xfs/scrub/ialloc.c
397
imap.im_blkno = xfs_agbno_to_daddr(to_perag(bs->cur->bc_group), agbno);
fs/xfs/scrub/ialloc.c
404
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
408
trace_xchk_iallocbt_check_cluster(to_perag(bs->cur->bc_group),
fs/xfs/scrub/ialloc.c
416
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
432
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &cluster_bp);
fs/xfs/scrub/ialloc.c
433
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
fs/xfs/scrub/ialloc.c
441
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
453
xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
fs/xfs/scrub/ialloc.c
515
if (xfs_btree_is_fino(bs->cur->bc_ops)) {
fs/xfs/scrub/ialloc.c
521
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
532
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
548
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
553
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
575
struct xfs_mount *mp = bs->cur->bc_mp;
fs/xfs/scrub/ialloc.c
586
if (xfs_inobt_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
fs/xfs/scrub/ialloc.c
587
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
602
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
616
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
630
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/ialloc.c
712
struct xfs_btree_cur *cur;
fs/xfs/scrub/ialloc.c
722
cur = sc->sa.ino_cur;
fs/xfs/scrub/ialloc.c
725
cur = sc->sa.fino_cur;
fs/xfs/scrub/ialloc.c
73
struct xfs_btree_cur *cur = sc->sa.fino_cur;
fs/xfs/scrub/ialloc.c
732
error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
fs/xfs/scrub/ialloc.c
79
ASSERT(xfs_btree_is_fino(cur->bc_ops));
fs/xfs/scrub/ialloc.c
81
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_record);
fs/xfs/scrub/ialloc.c
87
error = xfs_inobt_get_rec(cur, &frec, &has_record);
fs/xfs/scrub/ialloc_repair.c
444
struct xfs_btree_cur *cur,
fs/xfs/scrub/ialloc_repair.c
520
struct xfs_btree_cur *cur,
fs/xfs/scrub/ialloc_repair.c
526
struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i;
fs/xfs/scrub/ialloc_repair.c
540
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/ialloc_repair.c
541
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/ialloc_repair.c
550
struct xfs_btree_cur *cur,
fs/xfs/scrub/ialloc_repair.c
556
struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i;
fs/xfs/scrub/ialloc_repair.c
567
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/ialloc_repair.c
568
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/ialloc_repair.c
577
struct xfs_btree_cur *cur,
fs/xfs/scrub/ialloc_repair.c
583
return xrep_newbt_claim_block(cur, &ri->new_inobt, ptr);
fs/xfs/scrub/ialloc_repair.c
589
struct xfs_btree_cur *cur,
fs/xfs/scrub/ialloc_repair.c
595
return xrep_newbt_claim_block(cur, &ri->new_finobt, ptr);
fs/xfs/scrub/ialloc_repair.c
604
xfarray_idx_t cur;
fs/xfs/scrub/ialloc_repair.c
608
foreach_xfarray_idx(ri->inode_records, cur) {
fs/xfs/scrub/ialloc_repair.c
612
error = xfarray_load(ri->inode_records, cur, &irec);
fs/xfs/scrub/inode_repair.c
729
struct xfs_btree_cur *cur,
fs/xfs/scrub/inode_repair.c
764
struct xfs_btree_cur *cur;
fs/xfs/scrub/inode_repair.c
772
cur = xfs_rmapbt_init_cursor(ri->sc->mp, ri->sc->tp, agf, pag);
fs/xfs/scrub/inode_repair.c
773
error = xfs_rmap_query_all(cur, xrep_dinode_walk_rmap, ri);
fs/xfs/scrub/inode_repair.c
774
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/inode_repair.c
782
struct xfs_btree_cur *cur,
fs/xfs/scrub/iscan.c
116
cur = xfs_inobt_init_cursor(pag, tp, agi_bp);
fs/xfs/scrub/iscan.c
117
error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_rec);
fs/xfs/scrub/iscan.c
119
error = xfs_btree_increment(cur, 0, &has_rec);
fs/xfs/scrub/iscan.c
120
for (; !error; error = xfs_btree_increment(cur, 0, &has_rec)) {
fs/xfs/scrub/iscan.c
133
error = xfs_inobt_get_rec(cur, &rec, &has_rec);
fs/xfs/scrub/iscan.c
185
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/iscan.c
95
struct xfs_btree_cur *cur;
fs/xfs/scrub/newbt.c
562
struct xfs_btree_cur *cur,
fs/xfs/scrub/newbt.c
593
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
fs/xfs/scrub/newbt.h
72
int xrep_newbt_claim_block(struct xfs_btree_cur *cur, struct xrep_newbt *xnr,
fs/xfs/scrub/nlinks.c
897
xfarray_idx_t cur = XFARRAY_CURSOR_INIT;
fs/xfs/scrub/nlinks.c
940
while ((error = xfarray_iter(xnc->nlinks, &cur, &nl)) == 1) {
fs/xfs/scrub/nlinks.c
941
xfs_ino_t ino = cur - 1;
fs/xfs/scrub/quotacheck.c
617
xfarray_idx_t cur = XFARRAY_CURSOR_INIT;
fs/xfs/scrub/quotacheck.c
621
while ((error = xfarray_iter(counts, &cur, &xcdq)) == 1) {
fs/xfs/scrub/quotacheck.c
622
xfs_dqid_t id = cur - 1;
fs/xfs/scrub/quotacheck_repair.c
137
xfarray_idx_t cur = XFARRAY_CURSOR_INIT;
fs/xfs/scrub/quotacheck_repair.c
158
while ((error = xfarray_iter(counts, &cur, &xcdq)) == 1) {
fs/xfs/scrub/quotacheck_repair.c
159
xfs_dqid_t id = cur - 1;
fs/xfs/scrub/rcbag.c
105
error = rcbagbt_insert(cur, &bagrec, &has);
fs/xfs/scrub/rcbag.c
114
xfs_btree_del_cursor(cur, 0);
fs/xfs/scrub/rcbag.c
124
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rcbag.c
156
struct xfs_btree_cur *cur;
fs/xfs/scrub/rcbag.c
164
cur = rcbagbt_mem_cursor(mp, tp, &bag->xfbtree);
fs/xfs/scrub/rcbag.c
165
error = xfs_btree_goto_left_edge(cur);
fs/xfs/scrub/rcbag.c
170
error = xfs_btree_increment(cur, 0, &has);
fs/xfs/scrub/rcbag.c
176
error = rcbagbt_get_rec(cur, &bagrec, &has);
fs/xfs/scrub/rcbag.c
198
xfs_btree_del_cursor(cur, 0);
fs/xfs/scrub/rcbag.c
204
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rcbag.c
217
struct xfs_btree_cur *cur;
fs/xfs/scrub/rcbag.c
222
cur = rcbagbt_mem_cursor(mp, tp, &bag->xfbtree);
fs/xfs/scrub/rcbag.c
223
memset(&cur->bc_rec, 0xFF, sizeof(cur->bc_rec));
fs/xfs/scrub/rcbag.c
224
error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, &has);
fs/xfs/scrub/rcbag.c
229
error = xfs_btree_decrement(cur, 0, &has);
fs/xfs/scrub/rcbag.c
235
error = rcbagbt_get_rec(cur, &bagrec, &has);
fs/xfs/scrub/rcbag.c
246
error = xfs_btree_delete(cur, &has);
fs/xfs/scrub/rcbag.c
257
xfs_btree_del_cursor(cur, 0);
fs/xfs/scrub/rcbag.c
260
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rcbag.c
273
struct xfs_btree_cur *cur;
fs/xfs/scrub/rcbag.c
278
cur = rcbagbt_mem_cursor(mp, tp, &bag->xfbtree);
fs/xfs/scrub/rcbag.c
279
error = xfs_btree_goto_left_edge(cur);
fs/xfs/scrub/rcbag.c
284
error = xfs_btree_increment(cur, 0, &has);
fs/xfs/scrub/rcbag.c
290
error = rcbagbt_get_rec(cur, &bagrec, &has);
fs/xfs/scrub/rcbag.c
306
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rcbag.c
78
struct xfs_btree_cur *cur;
fs/xfs/scrub/rcbag.c
82
cur = rcbagbt_mem_cursor(mp, tp, &bag->xfbtree);
fs/xfs/scrub/rcbag.c
83
error = rcbagbt_lookup_eq(cur, rmap, &has);
fs/xfs/scrub/rcbag.c
88
error = rcbagbt_get_rec(cur, &bagrec, &has);
fs/xfs/scrub/rcbag.c
97
error = rcbagbt_update(cur, &bagrec);
fs/xfs/scrub/rcbag_btree.c
102
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
200
struct xfs_btree_cur *cur;
fs/xfs/scrub/rcbag_btree.c
202
cur = xfs_btree_alloc_cursor(mp, tp, &rcbagbt_mem_ops,
fs/xfs/scrub/rcbag_btree.c
205
cur->bc_mem.xfbtree = xfbtree;
fs/xfs/scrub/rcbag_btree.c
206
cur->bc_nlevels = xfbtree->nlevels;
fs/xfs/scrub/rcbag_btree.c
207
return cur;
fs/xfs/scrub/rcbag_btree.c
299
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
303
struct rcbag_rec *rec = (struct rcbag_rec *)&cur->bc_rec;
fs/xfs/scrub/rcbag_btree.c
308
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, success);
fs/xfs/scrub/rcbag_btree.c
314
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
321
error = xfs_btree_get_rec(cur, &btrec, has);
fs/xfs/scrub/rcbag_btree.c
332
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
338
return xfs_btree_update(cur, &btrec);
fs/xfs/scrub/rcbag_btree.c
344
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
348
struct rcbag_rec *btrec = (struct rcbag_rec *)&cur->bc_rec;
fs/xfs/scrub/rcbag_btree.c
351
return xfs_btree_insert(cur, success);
fs/xfs/scrub/rcbag_btree.c
39
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
43
struct rcbag_rec *bag_irec = (struct rcbag_rec *)&cur->bc_rec;
fs/xfs/scrub/rcbag_btree.c
52
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
55
struct rcbag_rec *rec = (struct rcbag_rec *)&cur->bc_rec;
fs/xfs/scrub/rcbag_btree.c
64
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.c
80
struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.h
69
int rcbagbt_lookup_eq(struct xfs_btree_cur *cur,
fs/xfs/scrub/rcbag_btree.h
71
int rcbagbt_get_rec(struct xfs_btree_cur *cur, struct rcbag_rec *rec, int *has);
fs/xfs/scrub/rcbag_btree.h
72
int rcbagbt_update(struct xfs_btree_cur *cur, const struct rcbag_rec *rec);
fs/xfs/scrub/rcbag_btree.h
73
int rcbagbt_insert(struct xfs_btree_cur *cur, const struct rcbag_rec *rec,
fs/xfs/scrub/reap.c
1239
struct xfs_btree_cur *cur;
fs/xfs/scrub/reap.c
1249
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
fs/xfs/scrub/reap.c
1254
error = xfs_rmap_has_other_keys(cur, agbno, 1, &oinfo, crosslinked);
fs/xfs/scrub/reap.c
1263
error = xfs_rmap_has_other_keys(cur, bno, 1, &oinfo,
fs/xfs/scrub/reap.c
1279
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/reap.c
356
struct xfs_btree_cur *cur;
fs/xfs/scrub/reap.c
365
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
fs/xfs/scrub/reap.c
367
error = xfs_rmap_has_other_keys(cur, agbno, 1, rs->oinfo,
fs/xfs/scrub/reap.c
383
error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
fs/xfs/scrub/reap.c
400
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/reap.c
912
struct xfs_btree_cur *cur;
fs/xfs/scrub/reap.c
921
cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg);
fs/xfs/scrub/reap.c
922
error = xfs_rmap_has_other_keys(cur, rgbno, 1, rs->oinfo,
fs/xfs/scrub/reap.c
934
error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo,
fs/xfs/scrub/reap.c
950
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/refcount.c
110
struct xfs_btree_cur *cur,
fs/xfs/scrub/refcount.c
128
xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
fs/xfs/scrub/refcount.c
360
struct xfs_btree_cur *cur,
fs/xfs/scrub/refcount.c
440
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/refcount.c
455
if (xfs_refcount_check_irec(to_perag(bs->cur->bc_group), &irec) !=
fs/xfs/scrub/refcount.c
457
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/refcount.c
467
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/refcount_repair.c
233
struct xfs_btree_cur *cur = rr->sc->sa.rmap_cur;
fs/xfs/scrub/refcount_repair.c
234
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/scrub/refcount_repair.c
250
error = xfs_btree_increment(cur, 0, &have_gt);
fs/xfs/scrub/refcount_repair.c
256
error = xfs_rmap_get_rec(cur, rmap, &have_gt);
fs/xfs/scrub/refcount_repair.c
260
xfs_btree_mark_sick(cur);
fs/xfs/scrub/refcount_repair.c
326
xfarray_idx_t cur;
fs/xfs/scrub/refcount_repair.c
336
foreach_xfarray_idx(rr->refcount_records, cur) {
fs/xfs/scrub/refcount_repair.c
340
error = xfarray_load(rr->refcount_records, cur, &irec);
fs/xfs/scrub/refcount_repair.c
513
struct xfs_btree_cur *cur,
fs/xfs/scrub/refcount_repair.c
519
struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
fs/xfs/scrub/refcount_repair.c
531
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/refcount_repair.c
532
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/refcount_repair.c
541
struct xfs_btree_cur *cur,
fs/xfs/scrub/refcount_repair.c
547
return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
fs/xfs/scrub/repair.c
671
struct xfs_btree_cur *cur,
fs/xfs/scrub/repair.c
715
struct xfs_btree_cur *cur;
fs/xfs/scrub/repair.c
732
cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
fs/xfs/scrub/repair.c
733
error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
fs/xfs/scrub/repair.c
734
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap.c
150
struct xfs_btree_cur *cur = bs->cur;
fs/xfs/scrub/rmap.c
159
for (level = 1; level < cur->bc_nlevels; level++) {
fs/xfs/scrub/rmap.c
164
if (cur->bc_levels[level].ptr > 1)
fs/xfs/scrub/rmap.c
167
keyblock = xfs_btree_get_block(cur, level, &bp);
fs/xfs/scrub/rmap.c
169
lkey = xfs_btree_key_addr(cur, ptr, keyblock);
fs/xfs/scrub/rmap.c
172
xchk_btree_set_preen(sc, cur, level);
fs/xfs/scrub/rmap.c
176
hkey = xfs_btree_high_key_addr(cur, ptr, keyblock);
fs/xfs/scrub/rmap.c
178
xchk_btree_set_preen(sc, cur, level);
fs/xfs/scrub/rmap.c
224
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rmap.c
275
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rmap.c
361
xfs_rmap_check_irec(to_perag(bs->cur->bc_group), &irec) != NULL) {
fs/xfs/scrub/rmap.c
362
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rmap.c
403
struct xfs_btree_cur *cur;
fs/xfs/scrub/rmap.c
422
cur = sc->sa.bno_cur;
fs/xfs/scrub/rmap.c
423
if (!cur)
fs/xfs/scrub/rmap.c
424
cur = xfs_bnobt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
fs/xfs/scrub/rmap.c
426
error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur);
fs/xfs/scrub/rmap.c
427
if (cur != sc->sa.bno_cur)
fs/xfs/scrub/rmap.c
428
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap.c
432
cur = sc->sa.cnt_cur;
fs/xfs/scrub/rmap.c
433
if (!cur)
fs/xfs/scrub/rmap.c
434
cur = xfs_cntbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
fs/xfs/scrub/rmap.c
436
error = xagb_bitmap_set_btblocks(&cr->ag_owned, cur);
fs/xfs/scrub/rmap.c
437
if (cur != sc->sa.cnt_cur)
fs/xfs/scrub/rmap.c
438
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap.c
457
cur = sc->sa.ino_cur;
fs/xfs/scrub/rmap.c
458
if (!cur)
fs/xfs/scrub/rmap.c
459
cur = xfs_inobt_init_cursor(sc->sa.pag, sc->tp, sc->sa.agi_bp);
fs/xfs/scrub/rmap.c
460
error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur);
fs/xfs/scrub/rmap.c
461
if (cur != sc->sa.ino_cur)
fs/xfs/scrub/rmap.c
462
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap.c
467
cur = sc->sa.fino_cur;
fs/xfs/scrub/rmap.c
468
if (!cur)
fs/xfs/scrub/rmap.c
469
cur = xfs_finobt_init_cursor(sc->sa.pag, sc->tp,
fs/xfs/scrub/rmap.c
471
error = xagb_bitmap_set_btblocks(&cr->inobt_owned, cur);
fs/xfs/scrub/rmap.c
472
if (cur != sc->sa.fino_cur)
fs/xfs/scrub/rmap.c
473
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap.c
480
cur = sc->sa.refc_cur;
fs/xfs/scrub/rmap.c
481
if (!cur)
fs/xfs/scrub/rmap.c
482
cur = xfs_refcountbt_init_cursor(sc->mp, sc->tp,
fs/xfs/scrub/rmap.c
484
error = xagb_bitmap_set_btblocks(&cr->refcbt_owned, cur);
fs/xfs/scrub/rmap.c
485
if (cur != sc->sa.refc_cur)
fs/xfs/scrub/rmap.c
486
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap.c
513
struct xfs_btree_cur *cur = sc->sa.rmap_cur;
fs/xfs/scrub/rmap.c
519
if (!cur)
fs/xfs/scrub/rmap.c
521
level = cur->bc_nlevels - 1;
fs/xfs/scrub/rmap.c
528
xchk_btree_xref_set_corrupt(sc, cur, level);
fs/xfs/scrub/rmap.c
531
xchk_btree_xref_set_corrupt(sc, cur, level);
fs/xfs/scrub/rmap.c
534
xchk_btree_xref_set_corrupt(sc, cur, level);
fs/xfs/scrub/rmap.c
537
xchk_btree_xref_set_corrupt(sc, cur, level);
fs/xfs/scrub/rmap.c
540
xchk_btree_xref_set_corrupt(sc, cur, level);
fs/xfs/scrub/rmap_repair.c
1218
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
1238
error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
fs/xfs/scrub/rmap_repair.c
1244
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/rmap_repair.c
1245
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/rmap_repair.c
1254
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
1260
return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
fs/xfs/scrub/rmap_repair.c
1306
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
1314
error = xfs_btree_goto_left_edge(cur);
fs/xfs/scrub/rmap_repair.c
1318
while (running && !(error = xfs_btree_increment(cur, 0, &running))) {
fs/xfs/scrub/rmap_repair.c
1461
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
1474
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
335
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
380
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
389
xfs_btree_get_block(cur, level, &bp);
fs/xfs/scrub/rmap_repair.c
393
fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
fs/xfs/scrub/rmap_repair.c
394
if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != pag_agno(rf->rr->sc->sa.pag))
fs/xfs/scrub/rmap_repair.c
397
agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/rmap_repair.c
408
struct xfs_btree_cur *cur)
fs/xfs/scrub/rmap_repair.c
417
error = xfs_btree_visit_blocks(cur, xrep_rmap_visit_iroot_btree_block,
fs/xfs/scrub/rmap_repair.c
447
struct xfs_btree_cur *cur;
fs/xfs/scrub/rmap_repair.c
453
cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, rf->whichfork);
fs/xfs/scrub/rmap_repair.c
464
error = xfs_bmap_query_all(cur, xrep_rmap_visit_bmbt, rf);
fs/xfs/scrub/rmap_repair.c
472
error = xrep_rmap_scan_iroot_btree(rf, cur);
fs/xfs/scrub/rmap_repair.c
474
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap_repair.c
509
struct xfs_btree_cur *cur = NULL;
fs/xfs/scrub/rmap_repair.c
547
cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg);
fs/xfs/scrub/rmap_repair.c
550
cur = xfs_rtrefcountbt_init_cursor(sc->tp, rtg);
fs/xfs/scrub/rmap_repair.c
558
error = xrep_rmap_scan_iroot_btree(rf, cur);
fs/xfs/scrub/rmap_repair.c
559
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rmap_repair.c
671
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
677
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/scrub/rmap_repair.c
686
error = xagb_bitmap_set_btcur_path(&ri->inobt_blocks, cur);
fs/xfs/scrub/rmap_repair.c
691
if (xfs_inobt_check_irec(to_perag(cur->bc_group), &irec) != NULL)
fs/xfs/scrub/rmap_repair.c
786
struct xfs_btree_cur *cur,
fs/xfs/scrub/rmap_repair.c
881
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtbitmap_repair.c
299
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount.c
124
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount.c
142
xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
fs/xfs/scrub/rtrefcount.c
403
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrefcount.c
410
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount.c
461
struct xfs_mount *mp = bs->cur->bc_mp;
fs/xfs/scrub/rtrefcount.c
467
if (xfs_rtrefcount_check_irec(to_rtg(bs->cur->bc_group), &irec) !=
fs/xfs/scrub/rtrefcount.c
469
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrefcount.c
476
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrefcount.c
479
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrefcount.c
487
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrefcount_repair.c
221
struct xfs_btree_cur *cur = rr->sc->sr.rmap_cur;
fs/xfs/scrub/rtrefcount_repair.c
222
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/scrub/rtrefcount_repair.c
238
error = xfs_btree_increment(cur, 0, &have_gt);
fs/xfs/scrub/rtrefcount_repair.c
244
error = xfs_rmap_get_rec(cur, rmap, &have_gt);
fs/xfs/scrub/rtrefcount_repair.c
248
xfs_btree_mark_sick(cur);
fs/xfs/scrub/rtrefcount_repair.c
260
xfs_btree_mark_sick(cur);
fs/xfs/scrub/rtrefcount_repair.c
314
xfarray_idx_t cur;
fs/xfs/scrub/rtrefcount_repair.c
324
foreach_xfarray_idx(rr->refcount_records, cur) {
fs/xfs/scrub/rtrefcount_repair.c
328
error = xfarray_load(rr->refcount_records, cur, &irec);
fs/xfs/scrub/rtrefcount_repair.c
352
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount_repair.c
371
xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock),
fs/xfs/scrub/rtrefcount_repair.c
555
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount_repair.c
568
&cur->bc_rec.rc);
fs/xfs/scrub/rtrefcount_repair.c
572
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/rtrefcount_repair.c
573
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/rtrefcount_repair.c
582
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount_repair.c
588
return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
fs/xfs/scrub/rtrefcount_repair.c
594
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrefcount_repair.c
599
return xfs_rtrefcount_broot_space_calc(cur->bc_mp, level,
fs/xfs/scrub/rtrmap.c
117
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrmap.c
163
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrmap.c
228
xfs_rtrmap_check_irec(to_rtg(bs->cur->bc_group), &irec) != NULL) {
fs/xfs/scrub/rtrmap.c
229
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
fs/xfs/scrub/rtrmap_repair.c
218
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
270
struct xfs_btree_cur *cur;
fs/xfs/scrub/rtrmap_repair.c
285
cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, XFS_DATA_FORK);
fs/xfs/scrub/rtrmap_repair.c
286
error = xfs_bmap_query_all(cur, xrep_rtrmap_visit_bmbt, rf);
fs/xfs/scrub/rtrmap_repair.c
287
xfs_btree_del_cursor(cur, error);
fs/xfs/scrub/rtrmap_repair.c
384
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
403
xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock),
fs/xfs/scrub/rtrmap_repair.c
464
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
522
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
643
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
663
error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
fs/xfs/scrub/rtrmap_repair.c
669
block_rec = xfs_btree_rec_addr(cur, idx, block);
fs/xfs/scrub/rtrmap_repair.c
670
cur->bc_ops->init_rec_from_cur(cur, block_rec);
fs/xfs/scrub/rtrmap_repair.c
679
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
685
return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
fs/xfs/scrub/rtrmap_repair.c
691
struct xfs_btree_cur *cur,
fs/xfs/scrub/rtrmap_repair.c
696
return xfs_rtrmap_broot_space_calc(cur->bc_mp, level, nr_this_level);
fs/xfs/scrub/trace.c
42
struct xfs_btree_cur *cur,
fs/xfs/scrub/trace.c
45
if (level < cur->bc_nlevels && cur->bc_levels[level].bp)
fs/xfs/scrub/trace.c
46
return XFS_DADDR_TO_FSB(cur->bc_mp,
fs/xfs/scrub/trace.c
47
xfs_buf_daddr(cur->bc_levels[level].bp));
fs/xfs/scrub/trace.c
49
if (level == cur->bc_nlevels - 1 &&
fs/xfs/scrub/trace.c
50
cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
fs/xfs/scrub/trace.c
51
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
fs/xfs/scrub/trace.h
557
TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/trace.h
559
TP_ARGS(sc, cur, level, error, ret_ip),
fs/xfs/scrub/trace.h
563
__string(name, cur->bc_ops->name)
fs/xfs/scrub/trace.h
572
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
fs/xfs/scrub/trace.h
578
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
579
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
580
__entry->ptr = cur->bc_levels[level].ptr;
fs/xfs/scrub/trace.h
597
TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/trace.h
599
TP_ARGS(sc, cur, level, error, ret_ip),
fs/xfs/scrub/trace.h
605
__string(name, cur->bc_ops->name)
fs/xfs/scrub/trace.h
614
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
fs/xfs/scrub/trace.h
616
__entry->ino = cur->bc_ino.ip->i_ino;
fs/xfs/scrub/trace.h
617
__entry->whichfork = cur->bc_ino.whichfork;
fs/xfs/scrub/trace.h
621
__entry->ptr = cur->bc_levels[level].ptr;
fs/xfs/scrub/trace.h
622
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
623
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
642
TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/trace.h
644
TP_ARGS(sc, cur, level, ret_ip),
fs/xfs/scrub/trace.h
648
__string(name, cur->bc_ops->name)
fs/xfs/scrub/trace.h
656
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
fs/xfs/scrub/trace.h
661
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
662
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
663
__entry->ptr = cur->bc_levels[level].ptr;
fs/xfs/scrub/trace.h
678
TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/trace.h
680
TP_ARGS(sc, cur, level, ret_ip),
fs/xfs/scrub/trace.h
686
__string(name, cur->bc_ops->name)
fs/xfs/scrub/trace.h
694
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
fs/xfs/scrub/trace.h
697
__entry->whichfork = cur->bc_ino.whichfork;
fs/xfs/scrub/trace.h
701
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
702
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
703
__entry->ptr = cur->bc_levels[level].ptr;
fs/xfs/scrub/trace.h
720
TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
fs/xfs/scrub/trace.h
722
TP_ARGS(sc, cur, level),
fs/xfs/scrub/trace.h
726
__string(name, cur->bc_ops->name)
fs/xfs/scrub/trace.h
734
xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level);
fs/xfs/scrub/trace.h
739
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
740
__entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
fs/xfs/scrub/trace.h
742
__entry->nlevels = cur->bc_nlevels;
fs/xfs/scrub/trace.h
743
__entry->ptr = cur->bc_levels[level].ptr;
fs/xfs/scrub/trace.h
757
TP_PROTO(struct xfs_scrub *sc, struct xfs_btree_cur *cur, \
fs/xfs/scrub/trace.h
759
TP_ARGS(sc, cur, level))
fs/xfs/scrub/xfarray.c
288
xfarray_idx_t *cur,
fs/xfs/scrub/xfarray.c
327
*cur = xfarray_idx(array, new_pos);
fs/xfs/scrub/xfarray.c
328
*pos = xfarray_pos(array, *cur);
fs/xfs/scrub/xfarray.c
344
xfarray_idx_t cur = *idx;
fs/xfs/scrub/xfarray.c
345
loff_t pos = xfarray_pos(array, cur);
fs/xfs/scrub/xfarray.c
349
if (cur >= array->nr)
fs/xfs/scrub/xfarray.c
356
error = xfarray_find_data(array, &cur, &pos);
fs/xfs/scrub/xfarray.c
359
error = xfarray_load(array, cur, rec);
fs/xfs/scrub/xfarray.c
363
cur++;
fs/xfs/scrub/xfarray.c
367
*idx = cur;
fs/xfs/xfs_bmap_util.c
106
struct xfs_btree_cur *cur;
fs/xfs/xfs_bmap_util.c
122
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fs/xfs/xfs_bmap_util.c
123
error = xfs_btree_count_blocks(cur, &btblocks);
fs/xfs/xfs_bmap_util.c
124
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_discard.c
168
struct xfs_btree_cur *cur;
fs/xfs/xfs_discard.c
197
cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
fs/xfs/xfs_discard.c
198
error = xfs_alloc_lookup_le(cur, tcur->start, 0, &i);
fs/xfs/xfs_discard.c
200
error = xfs_alloc_lookup_ge(cur, tcur->start, 0, &i);
fs/xfs/xfs_discard.c
203
cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
fs/xfs/xfs_discard.c
204
error = xfs_alloc_lookup_ge(cur, 0, tcur->count, &i);
fs/xfs/xfs_discard.c
207
cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
fs/xfs/xfs_discard.c
208
error = xfs_alloc_lookup_le(cur, tcur->start, tcur->count, &i);
fs/xfs/xfs_discard.c
226
error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
fs/xfs/xfs_discard.c
230
xfs_btree_mark_sick(cur);
fs/xfs/xfs_discard.c
293
error = xfs_btree_increment(cur, 0, &i);
fs/xfs/xfs_discard.c
295
error = xfs_btree_decrement(cur, 0, &i);
fs/xfs/xfs_discard.c
315
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_dquot.c
761
struct xfs_iext_cursor cur;
fs/xfs/xfs_dquot.c
783
if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
fs/xfs/xfs_fsmap.c
214
struct xfs_btree_cur *cur;
fs/xfs/xfs_fsmap.c
224
cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(info->group));
fs/xfs/xfs_fsmap.c
226
cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
fs/xfs/xfs_fsmap.c
230
error = xfs_refcount_find_shared(cur, frec->rec_key,
fs/xfs/xfs_fsmap.c
234
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_fsmap.c
404
struct xfs_btree_cur *cur,
fs/xfs/xfs_fsmap.c
416
return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group,
fs/xfs/xfs_fsmap.c
423
struct xfs_btree_cur *cur,
fs/xfs/xfs_fsmap.c
433
return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group,
fs/xfs/xfs_fsmap.c
840
struct xfs_btree_cur *cur,
fs/xfs/xfs_fsmap.c
852
return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group,
fs/xfs/xfs_health.c
697
struct xfs_btree_cur *cur)
fs/xfs/xfs_health.c
699
if (xfs_btree_is_bmap(cur->bc_ops)) {
fs/xfs/xfs_health.c
700
xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork);
fs/xfs/xfs_health.c
702
} else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) {
fs/xfs/xfs_health.c
703
ASSERT(cur->bc_group);
fs/xfs/xfs_health.c
704
ASSERT(cur->bc_ops->sick_mask);
fs/xfs/xfs_health.c
705
xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask);
fs/xfs/xfs_iwalk.c
399
struct xfs_btree_cur *cur = NULL;
fs/xfs/xfs_iwalk.c
407
error = xfs_iwalk_ag_start(iwag, agino, &cur, &agi_bp, &has_more);
fs/xfs/xfs_iwalk.c
419
error = xfs_inobt_get_rec(cur, irec, &has_more);
fs/xfs/xfs_iwalk.c
427
xfs_btree_mark_sick(cur);
fs/xfs/xfs_iwalk.c
435
error = xfs_btree_increment(cur, 0, &has_more);
fs/xfs/xfs_iwalk.c
453
error = xfs_btree_increment(cur, 0, &has_more);
fs/xfs/xfs_iwalk.c
466
error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
fs/xfs/xfs_iwalk.c
473
error = xfs_iwalk_run_callbacks(iwag, &cur, &agi_bp, &has_more);
fs/xfs/xfs_iwalk.c
476
xfs_iwalk_del_inobt(iwag->tp, &cur, &agi_bp, error);
fs/xfs/xfs_log_cil.c
783
struct xfs_ail_cursor *cur,
fs/xfs/xfs_log_cil.c
792
xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
fs/xfs/xfs_log_cil.c
849
struct xfs_ail_cursor cur;
fs/xfs/xfs_log_cil.c
866
xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn);
fs/xfs/xfs_log_cil.c
940
xlog_cil_ail_insert_batch(ailp, &cur, log_items,
fs/xfs/xfs_log_cil.c
948
xlog_cil_ail_insert_batch(ailp, &cur, log_items, i,
fs/xfs/xfs_log_cil.c
952
xfs_trans_ail_cursor_done(&cur);
fs/xfs/xfs_notify_failure.c
249
struct xfs_btree_cur *cur = NULL;
fs/xfs/xfs_notify_failure.c
311
cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag);
fs/xfs/xfs_notify_failure.c
315
cur = xfs_rtrmapbt_init_cursor(tp, rtg);
fs/xfs/xfs_notify_failure.c
335
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
fs/xfs/xfs_notify_failure.c
337
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_notify_failure.c
75
struct xfs_btree_cur *cur,
fs/xfs/xfs_notify_failure.c
79
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/xfs_notify_failure.c
97
error = xfs_iget(mp, cur->bc_tp, rec->rm_owner, XFS_IGET_INCORE,
fs/xfs/xfs_reflink.c
146
struct xfs_btree_cur *cur;
fs/xfs/xfs_reflink.c
157
cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag);
fs/xfs/xfs_reflink.c
158
error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount,
fs/xfs/xfs_reflink.c
160
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_reflink.c
189
struct xfs_btree_cur *cur;
fs/xfs/xfs_reflink.c
205
cur = xfs_rtrefcountbt_init_cursor(tp, rtg);
fs/xfs/xfs_reflink.c
206
error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount,
fs/xfs/xfs_reflink.c
208
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_trace.h
2305
TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t bno,
fs/xfs/xfs_trace.h
2307
TP_ARGS(cur, bno, len, diff, new),
fs/xfs/xfs_trace.h
2310
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
2317
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
2894
TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
fs/xfs/xfs_trace.h
2895
TP_ARGS(cur, level, bp),
fs/xfs/xfs_trace.h
2898
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
2905
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
2908
__entry->nlevels = cur->bc_nlevels;
fs/xfs/xfs_trace.h
2909
__entry->ptr = cur->bc_levels[level].ptr;
fs/xfs/xfs_trace.h
2923
TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), \
fs/xfs/xfs_trace.h
2924
TP_ARGS(cur, level, bp))
fs/xfs/xfs_trace.h
2929
TP_PROTO(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int stat,
fs/xfs/xfs_trace.h
2931
TP_ARGS(cur, ptr, stat, error),
fs/xfs/xfs_trace.h
2936
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
2941
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
2942
switch (cur->bc_ops->type) {
fs/xfs/xfs_trace.h
2945
__entry->ino = cur->bc_ino.ip->i_ino;
fs/xfs/xfs_trace.h
2948
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
2959
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/xfs_trace.h
2962
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp,
fs/xfs/xfs_trace.h
2964
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp,
fs/xfs/xfs_trace.h
2983
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_buf *bp),
fs/xfs/xfs_trace.h
2984
TP_ARGS(cur, bp),
fs/xfs/xfs_trace.h
2989
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
2993
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
2994
__entry->agno = xfs_daddr_to_agno(cur->bc_mp,
fs/xfs/xfs_trace.h
2996
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
fs/xfs/xfs_trace.h
2997
__entry->ino = cur->bc_ino.ip->i_ino;
fs/xfs/xfs_trace.h
3001
__entry->agbno = xfs_daddr_to_agbno(cur->bc_mp,
fs/xfs/xfs_trace.h
3192
TP_PROTO(struct xfs_btree_cur *cur,
fs/xfs/xfs_trace.h
3195
TP_ARGS(cur, gbno, len, unwritten, oinfo),
fs/xfs/xfs_trace.h
3207
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3208
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3209
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3230
TP_PROTO(struct xfs_btree_cur *cur, \
fs/xfs/xfs_trace.h
3233
TP_ARGS(cur, gbno, len, unwritten, oinfo))
fs/xfs/xfs_trace.h
3237
TP_PROTO(struct xfs_btree_cur *cur, int error,
fs/xfs/xfs_trace.h
3239
TP_ARGS(cur, error, caller_ip),
fs/xfs/xfs_trace.h
3248
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3249
switch (cur->bc_ops->type) {
fs/xfs/xfs_trace.h
3252
__entry->ino = cur->bc_ino.ip->i_ino;
fs/xfs/xfs_trace.h
3255
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3276
TP_PROTO(struct xfs_btree_cur *cur, int error, \
fs/xfs/xfs_trace.h
3278
TP_ARGS(cur, error, caller_ip))
fs/xfs/xfs_trace.h
3291
TP_PROTO(struct xfs_btree_cur *cur, int state,
fs/xfs/xfs_trace.h
3293
TP_ARGS(cur, state, caller_ip),
fs/xfs/xfs_trace.h
3302
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3303
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3304
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3317
TP_PROTO(struct xfs_btree_cur *cur,
fs/xfs/xfs_trace.h
3320
TP_ARGS(cur, gbno, len, owner, offset, flags),
fs/xfs/xfs_trace.h
3332
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3333
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3334
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3353
TP_PROTO(struct xfs_btree_cur *cur, \
fs/xfs/xfs_trace.h
3356
TP_ARGS(cur, gbno, len, owner, offset, flags))
fs/xfs/xfs_trace.h
3575
TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno,
fs/xfs/xfs_trace.h
3577
TP_ARGS(cur, gbno, len),
fs/xfs/xfs_trace.h
3586
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3587
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3588
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3601
TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, \
fs/xfs/xfs_trace.h
3603
TP_ARGS(cur, gbno, len))
fs/xfs/xfs_trace.h
3609
TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno,
fs/xfs/xfs_trace.h
3611
TP_ARGS(cur, gbno, dir),
fs/xfs/xfs_trace.h
3620
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3621
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3622
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3637
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec),
fs/xfs/xfs_trace.h
3638
TP_ARGS(cur, irec),
fs/xfs/xfs_trace.h
3649
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3650
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3651
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3669
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec), \
fs/xfs/xfs_trace.h
3670
TP_ARGS(cur, irec))
fs/xfs/xfs_trace.h
3674
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec,
fs/xfs/xfs_trace.h
3676
TP_ARGS(cur, irec, gbno),
fs/xfs/xfs_trace.h
3688
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3689
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3690
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3710
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, \
fs/xfs/xfs_trace.h
3712
TP_ARGS(cur, irec, gbno))
fs/xfs/xfs_trace.h
3716
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
fs/xfs/xfs_trace.h
3718
TP_ARGS(cur, i1, i2),
fs/xfs/xfs_trace.h
3733
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3734
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3735
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3762
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
fs/xfs/xfs_trace.h
3764
TP_ARGS(cur, i1, i2))
fs/xfs/xfs_trace.h
3768
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
fs/xfs/xfs_trace.h
3770
TP_ARGS(cur, i1, i2, gbno),
fs/xfs/xfs_trace.h
3786
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3787
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3788
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3817
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
fs/xfs/xfs_trace.h
3819
TP_ARGS(cur, i1, i2, gbno))
fs/xfs/xfs_trace.h
3823
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
fs/xfs/xfs_trace.h
3825
TP_ARGS(cur, i1, i2, i3),
fs/xfs/xfs_trace.h
3844
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
3845
__entry->type = cur->bc_group->xg_type;
fs/xfs/xfs_trace.h
3846
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
3882
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
fs/xfs/xfs_trace.h
3884
TP_ARGS(cur, i1, i2, i3))
fs/xfs/xfs_trace.h
4728
TP_PROTO(struct xfs_btree_cur *cur),
fs/xfs/xfs_trace.h
4729
TP_ARGS(cur),
fs/xfs/xfs_trace.h
4732
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
4739
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
4741
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
4742
__entry->agbno = cur->bc_ag.afake->af_root;
fs/xfs/xfs_trace.h
4743
__entry->levels = cur->bc_ag.afake->af_levels;
fs/xfs/xfs_trace.h
4744
__entry->blocks = cur->bc_ag.afake->af_blocks;
fs/xfs/xfs_trace.h
4756
TP_PROTO(struct xfs_btree_cur *cur),
fs/xfs/xfs_trace.h
4757
TP_ARGS(cur),
fs/xfs/xfs_trace.h
4760
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
4768
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
4770
__entry->agno = XFS_INO_TO_AGNO(cur->bc_mp,
fs/xfs/xfs_trace.h
4771
cur->bc_ino.ip->i_ino);
fs/xfs/xfs_trace.h
4772
__entry->agino = XFS_INO_TO_AGINO(cur->bc_mp,
fs/xfs/xfs_trace.h
4773
cur->bc_ino.ip->i_ino);
fs/xfs/xfs_trace.h
4774
__entry->levels = cur->bc_ino.ifake->if_levels;
fs/xfs/xfs_trace.h
4775
__entry->blocks = cur->bc_ino.ifake->if_blocks;
fs/xfs/xfs_trace.h
4776
__entry->whichfork = cur->bc_ino.whichfork;
fs/xfs/xfs_trace.h
4789
TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
fs/xfs/xfs_trace.h
4793
TP_ARGS(cur, level, nr_this_level, nr_per_block, desired_npb, blocks,
fs/xfs/xfs_trace.h
4797
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
4807
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
4810
__entry->nlevels = cur->bc_nlevels;
fs/xfs/xfs_trace.h
4830
TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
fs/xfs/xfs_trace.h
4833
TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records),
fs/xfs/xfs_trace.h
4836
__string(name, cur->bc_ops->name)
fs/xfs/xfs_trace.h
4845
__entry->dev = cur->bc_mp->m_super->s_dev;
fs/xfs/xfs_trace.h
4850
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fs/xfs/xfs_trace.h
4853
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
fs/xfs/xfs_trace.h
4854
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
fs/xfs/xfs_trace.h
4856
__entry->agno = cur->bc_group->xg_gno;
fs/xfs/xfs_trace.h
5233
TP_PROTO(struct xfbtree *xfbt, struct xfs_btree_cur *cur,
fs/xfs/xfs_trace.h
5235
TP_ARGS(xfbt, cur, fileoff),
fs/xfs/xfs_trace.h
5238
__string(btname, cur->bc_ops->name)
fs/xfs/xfs_trace.h
5245
__entry->nlevels = cur->bc_nlevels;
fs/xfs/xfs_trace.h
5257
TP_PROTO(struct xfbtree *xfbt, struct xfs_btree_cur *cur, \
fs/xfs/xfs_trace.h
5259
TP_ARGS(xfbt, cur, fileoff))
fs/xfs/xfs_trace.h
5689
const struct xfs_attrlist_cursor_kern *cur),
fs/xfs/xfs_trace.h
5690
TP_ARGS(ip, ppi, cur),
fs/xfs/xfs_trace.h
5708
__entry->hashval = cur->hashval;
fs/xfs/xfs_trace.h
5709
__entry->blkno = cur->blkno;
fs/xfs/xfs_trace.h
5710
__entry->offset = cur->offset;
fs/xfs/xfs_trace.h
5711
__entry->initted = cur->initted;
fs/xfs/xfs_trace.h
5727
const struct xfs_attrlist_cursor_kern *cur), \
fs/xfs/xfs_trace.h
5728
TP_ARGS(ip, ppi, cur))
fs/xfs/xfs_trace.h
667
TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state,
fs/xfs/xfs_trace.h
669
TP_ARGS(ip, cur, state, caller_ip),
fs/xfs/xfs_trace.h
687
xfs_iext_get_extent(ifp, cur, &r);
fs/xfs/xfs_trace.h
690
__entry->leaf = cur->leaf;
fs/xfs/xfs_trace.h
691
__entry->pos = cur->pos;
fs/xfs/xfs_trace.h
715
TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \
fs/xfs/xfs_trace.h
717
TP_ARGS(ip, cur, state, caller_ip))
fs/xfs/xfs_trans_ail.c
147
struct xfs_ail_cursor *cur)
fs/xfs/xfs_trans_ail.c
149
cur->item = NULL;
fs/xfs/xfs_trans_ail.c
150
list_add_tail(&cur->list, &ailp->ail_cursors);
fs/xfs/xfs_trans_ail.c
160
struct xfs_ail_cursor *cur)
fs/xfs/xfs_trans_ail.c
162
struct xfs_log_item *lip = cur->item;
fs/xfs/xfs_trans_ail.c
167
cur->item = xfs_ail_next(ailp, lip);
fs/xfs/xfs_trans_ail.c
177
struct xfs_ail_cursor *cur)
fs/xfs/xfs_trans_ail.c
179
cur->item = NULL;
fs/xfs/xfs_trans_ail.c
180
list_del_init(&cur->list);
fs/xfs/xfs_trans_ail.c
196
struct xfs_ail_cursor *cur;
fs/xfs/xfs_trans_ail.c
198
list_for_each_entry(cur, &ailp->ail_cursors, list) {
fs/xfs/xfs_trans_ail.c
199
if (cur->item == lip)
fs/xfs/xfs_trans_ail.c
200
cur->item = (struct xfs_log_item *)
fs/xfs/xfs_trans_ail.c
201
((uintptr_t)cur->item | 1);
fs/xfs/xfs_trans_ail.c
214
struct xfs_ail_cursor *cur,
fs/xfs/xfs_trans_ail.c
219
xfs_trans_ail_cursor_init(ailp, cur);
fs/xfs/xfs_trans_ail.c
234
cur->item = xfs_ail_next(ailp, lip);
fs/xfs/xfs_trans_ail.c
261
struct xfs_ail_cursor *cur,
fs/xfs/xfs_trans_ail.c
264
xfs_trans_ail_cursor_init(ailp, cur);
fs/xfs/xfs_trans_ail.c
265
cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
fs/xfs/xfs_trans_ail.c
266
return cur->item;
fs/xfs/xfs_trans_ail.c
278
struct xfs_ail_cursor *cur,
fs/xfs/xfs_trans_ail.c
291
lip = cur ? cur->item : NULL;
fs/xfs/xfs_trans_ail.c
302
if (cur)
fs/xfs/xfs_trans_ail.c
303
cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
fs/xfs/xfs_trans_ail.c
540
struct xfs_ail_cursor cur;
fs/xfs/xfs_trans_ail.c
572
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
fs/xfs/xfs_trans_ail.c
606
lip = xfs_trans_ail_cursor_next(ailp, &cur);
fs/xfs/xfs_trans_ail.c
615
xfs_trans_ail_cursor_done(&cur);
fs/xfs/xfs_trans_ail.c
829
struct xfs_ail_cursor *cur,
fs/xfs/xfs_trans_ail.c
862
xfs_ail_splice(ailp, cur, &tmp, lsn);
fs/xfs/xfs_trans_priv.h
125
struct xfs_ail_cursor *cur,
fs/xfs/xfs_trans_priv.h
128
struct xfs_ail_cursor *cur,
fs/xfs/xfs_trans_priv.h
131
struct xfs_ail_cursor *cur);
fs/xfs/xfs_trans_priv.h
132
void xfs_trans_ail_cursor_done(struct xfs_ail_cursor *cur);
fs/xfs/xfs_trans_priv.h
73
struct xfs_ail_cursor *cur,
fs/xfs/xfs_verify_media.c
115
struct xfs_btree_cur *cur;
fs/xfs/xfs_verify_media.c
129
cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag);
fs/xfs/xfs_verify_media.c
133
cur = xfs_rtrmapbt_init_cursor(tp, rtg);
fs/xfs/xfs_verify_media.c
153
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
fs/xfs/xfs_verify_media.c
155
xfs_btree_del_cursor(cur, error);
fs/xfs/xfs_verify_media.c
38
struct xfs_btree_cur *cur,
fs/xfs/xfs_verify_media.c
42
struct xfs_mount *mp = cur->bc_mp;
fs/xfs/xfs_verify_media.c
58
error = xfs_iget(mp, cur->bc_tp, rec->rm_owner, 0, 0, &ip);
fs/xfs/xfs_zone_gc.c
267
struct xfs_btree_cur *cur,
fs/xfs/xfs_zone_gc.c
274
ASSERT(!xfs_is_sb_inum(cur->bc_mp, irec->rm_owner));
fs/xfs/xfs_zone_gc.c
309
struct xfs_btree_cur *cur;
fs/xfs/xfs_zone_gc.c
326
cur = xfs_rtrmapbt_init_cursor(tp, rtg);
fs/xfs/xfs_zone_gc.c
327
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
fs/xfs/xfs_zone_gc.c
330
xfs_btree_del_cursor(cur, error < 0 ? error : 0);
include/crypto/if_alg.h
67
unsigned int cur; /* Last processed SG entry */
include/crypto/internal/acompress.h
211
static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
include/crypto/internal/acompress.h
213
return walk->slen != cur;
include/dt-bindings/usb/pd.h
341
#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
include/dt-bindings/usb/pd.h
344
| (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
include/dt-bindings/usb/pd.h
346
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
include/dt-bindings/usb/pd.h
349
| ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
include/dt-bindings/usb/pd.h
350
#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
include/dt-bindings/usb/pd.h
353
| ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
include/linux/bpf_verifier.h
694
u32 cur;
include/linux/bpf_verifier.h
896
struct bpf_verifier_state *cur = env->cur_state;
include/linux/bpf_verifier.h
898
return cur->frame[cur->curframe];
include/linux/ceph/libceph.h
199
type *cur = rb_entry(*n, type, nodefld); \
include/linux/ceph/libceph.h
203
cmp = cmpexp(keyexp(t->keyfld), keyexp(cur->keyfld)); \
include/linux/ceph/libceph.h
239
type *cur = rb_entry(n, type, nodefld); \
include/linux/ceph/libceph.h
242
cmp = cmpexp(key, keyexp(cur->keyfld)); \
include/linux/ceph/libceph.h
248
return cur; \
include/linux/cpufreq.h
667
if (policy->max < policy->cur)
include/linux/cpufreq.h
670
else if (policy->min > policy->cur)
include/linux/cpufreq.h
68
unsigned int cur; /* in kHz, only needed if cpufreq
include/linux/device/bus.h
219
bus_find_next_device(const struct bus_type *bus,struct device *cur)
include/linux/device/bus.h
221
return bus_find_device(bus, cur, NULL, device_match_any);
include/linux/if_team.h
284
struct team_port *cur;
include/linux/if_team.h
288
cur = port;
include/linux/if_team.h
289
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
include/linux/if_team.h
290
if (team_port_txable(cur))
include/linux/if_team.h
291
return cur;
include/linux/if_team.h
292
list_for_each_entry_rcu(cur, &team->port_list, list) {
include/linux/if_team.h
293
if (cur == port)
include/linux/if_team.h
295
if (team_port_txable(cur))
include/linux/if_team.h
296
return cur;
include/linux/iversion.h
144
u64 cur = inode_peek_iversion_raw(inode);
include/linux/iversion.h
147
if (cur > val)
include/linux/iversion.h
149
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
include/linux/kprobes.h
548
struct llist_node **cur)
include/linux/kprobes.h
550
return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
include/linux/kprobes.h
559
struct llist_node **cur);
include/linux/kprobes.h
569
struct llist_node **cur)
include/linux/kvm_host.h
309
static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
include/linux/kvm_host.h
311
return single_task_running() && !need_resched() && ktime_before(cur, stop);
include/linux/lockd/lockd.h
272
typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
include/linux/mdio-mux.h
25
int (*switch_fn) (int cur, int desired, void *data),
include/linux/mlx5/device.h
1295
MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
include/linux/mlx5/device.h
1298
MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
include/linux/mlx5/device.h
1304
MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
include/linux/mlx5/device.h
1307
MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
include/linux/mlx5/device.h
1314
mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
include/linux/mlx5/device.h
1318
mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
include/linux/mlx5/device.h
1321
MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
include/linux/mlx5/device.h
1327
MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
include/linux/mlx5/device.h
1333
MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
include/linux/mlx5/device.h
1336
MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
include/linux/mlx5/device.h
1364
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
include/linux/mlx5/device.h
1383
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
include/linux/mlx5/device.h
1387
(mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
include/linux/mlx5/device.h
1391
mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
include/linux/mlx5/device.h
1399
mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
include/linux/mlx5/device.h
1403
mdev->caps.hca[MLX5_CAP_ADV_RDMA]->cur, cap)
include/linux/mlx5/device.h
1412
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
include/linux/mlx5/device.h
1415
(MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
include/linux/mlx5/device.h
1417
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
include/linux/mlx5/device.h
1419
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
include/linux/mlx5/device.h
1426
MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
include/linux/mlx5/device.h
1429
MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
include/linux/mlx5/device.h
1465
MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
include/linux/mlx5/device.h
1468
MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
include/linux/mlx5/device.h
1471
MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
include/linux/mlx5/device.h
1474
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
include/linux/mlx5/device.h
1478
(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
include/linux/mlx5/device.h
1482
(mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
include/linux/mlx5/device.h
1485
MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
include/linux/mlx5/device.h
1488
MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
include/linux/mlx5/device.h
1491
MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
include/linux/mlx5/device.h
1494
MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap)
include/linux/mlx5/device.h
1497
MLX5_GET(psp_cap, (mdev)->caps.hca[MLX5_CAP_PSP]->cur, cap)
include/linux/mlx5/driver.h
728
u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
include/linux/of.h
451
const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur,
include/linux/of.h
460
const char *of_prop_next_string(const struct property *prop, const char *cur);
include/linux/of.h
88
const __be32 *cur;
include/linux/of.h
886
const __be32 *cur, u32 *pu)
include/linux/of.h
892
const char *cur)
include/linux/rethook.h
67
struct llist_node **cur);
include/linux/sched.h
1884
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
include/linux/soc/mediatek/mtk-mmsys.h
88
enum mtk_ddp_comp_id cur,
include/linux/soc/mediatek/mtk-mmsys.h
92
enum mtk_ddp_comp_id cur,
include/linux/usb/pd_vdo.h
371
#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
include/linux/usb/pd_vdo.h
374
| (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
include/linux/usb/pd_vdo.h
376
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
include/linux/usb/pd_vdo.h
379
| ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
include/linux/usb/pd_vdo.h
380
#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
include/linux/usb/pd_vdo.h
383
| ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
include/media/v4l2-ctrls.h
318
} cur;
include/net/fq_impl.h
173
struct fq_flow *cur = &fq->flows[i];
include/net/fq_impl.h
176
cur_len = cur->backlog;
include/net/fq_impl.h
180
flow = cur;
include/net/mac80211.h
2583
struct ieee80211_sta_aggregates *cur;
include/net/netfilter/nf_tables.h
132
const struct nft_expr *cur;
include/trace/events/timestamp.h
56
u32 cur),
include/trace/events/timestamp.h
58
TP_ARGS(inode, old, new, cur),
include/trace/events/timestamp.h
66
__field(u32, cur)
include/trace/events/timestamp.h
75
__entry->cur = cur;
include/trace/events/timestamp.h
83
__entry->cur & ~I_CTIME_QUERIED,
include/trace/events/timestamp.h
84
__print_flags(__entry->cur & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS)
io_uring/io_uring.c
391
struct io_kiocb *cur;
io_uring/io_uring.c
397
io_for_each_link(cur, req)
io_uring/io_uring.c
398
io_prep_async_work(cur);
io_uring/io_uring.c
401
io_for_each_link(cur, req)
io_uring/io_uring.c
402
io_prep_async_work(cur);
kernel/bpf/bpf_iter.c
764
int cur; /* current value, inclusive */
kernel/bpf/bpf_iter.c
781
s->cur = s->end = 0;
kernel/bpf/bpf_iter.c
787
s->cur = s->end = 0;
kernel/bpf/bpf_iter.c
795
s->cur = start - 1;
kernel/bpf/bpf_iter.c
810
if ((s64)(s->cur + 1) >= s->end) {
kernel/bpf/bpf_iter.c
811
s->cur = s->end = 0;
kernel/bpf/bpf_iter.c
815
s->cur++;
kernel/bpf/bpf_iter.c
817
return &s->cur;
kernel/bpf/bpf_iter.c
824
s->cur = s->end = 0;
kernel/bpf/bpf_lru_list.c
167
struct list_head *cur, *last, *next = inactive;
kernel/bpf/bpf_lru_list.c
178
cur = l->next_inactive_rotation;
kernel/bpf/bpf_lru_list.c
180
if (cur == inactive) {
kernel/bpf/bpf_lru_list.c
181
cur = cur->prev;
kernel/bpf/bpf_lru_list.c
185
node = list_entry(cur, struct bpf_lru_node, list);
kernel/bpf/bpf_lru_list.c
186
next = cur->prev;
kernel/bpf/bpf_lru_list.c
189
if (cur == last)
kernel/bpf/bpf_lru_list.c
191
cur = next;
kernel/bpf/btf.c
3666
u32 cur;
kernel/bpf/btf.c
3689
cur = field_cnt;
kernel/bpf/btf.c
3691
memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
kernel/bpf/btf.c
3693
info[cur++].off += (i + 1) * elem_size;
kernel/bpf/btf.c
5365
void *cur, *end;
kernel/bpf/btf.c
5368
cur = btf->nohdr_data + hdr->type_off;
kernel/bpf/btf.c
5369
end = cur + hdr->type_len;
kernel/bpf/btf.c
5372
while (cur < end) {
kernel/bpf/btf.c
5373
struct btf_type *t = cur;
kernel/bpf/btf.c
5376
meta_size = btf_check_meta(env, t, end - cur);
kernel/bpf/btf.c
5381
cur += meta_size;
kernel/bpf/rqspinlock.c
202
u64 prev = ts->cur;
kernel/bpf/rqspinlock.c
208
ts->cur = ktime_get_mono_fast_ns();
kernel/bpf/rqspinlock.c
209
ts->timeout_end = ts->cur + ts->duration;
kernel/bpf/rqspinlock.c
222
ts->cur = time;
kernel/bpf/rqspinlock.c
83
u64 cur;
kernel/bpf/verifier.c
1808
struct bpf_verifier_state *cur = env->cur_state;
kernel/bpf/verifier.c
1809
struct bpf_func_state *state = cur->frame[cur->curframe];
kernel/bpf/verifier.c
19561
const struct bpf_reg_state *cur)
kernel/bpf/verifier.c
19563
return old->umin_value <= cur->umin_value &&
kernel/bpf/verifier.c
19564
old->umax_value >= cur->umax_value &&
kernel/bpf/verifier.c
19565
old->smin_value <= cur->smin_value &&
kernel/bpf/verifier.c
19566
old->smax_value >= cur->smax_value &&
kernel/bpf/verifier.c
19567
old->u32_min_value <= cur->u32_min_value &&
kernel/bpf/verifier.c
19568
old->u32_max_value >= cur->u32_max_value &&
kernel/bpf/verifier.c
19569
old->s32_min_value <= cur->s32_min_value &&
kernel/bpf/verifier.c
19570
old->s32_max_value >= cur->s32_max_value;
kernel/bpf/verifier.c
19597
return map[i].cur == cur_id;
kernel/bpf/verifier.c
19598
if (map[i].cur == cur_id)
kernel/bpf/verifier.c
19605
map[idmap->cnt].cur = cur_id;
kernel/bpf/verifier.c
19778
struct bpf_verifier_state *cur)
kernel/bpf/verifier.c
19789
!same_callsites(&sl->state, cur))
kernel/bpf/verifier.c
20011
struct bpf_func_state *cur, struct bpf_idmap *idmap,
kernel/bpf/verifier.c
20026
(i >= cur->allocated_stack ||
kernel/bpf/verifier.c
20028
cur->stack[spi].slot_type[i % BPF_REG_SIZE]))
kernel/bpf/verifier.c
20041
if (i >= cur->allocated_stack)
kernel/bpf/verifier.c
20050
cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]);
kernel/bpf/verifier.c
20063
cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
kernel/bpf/verifier.c
20066
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
kernel/bpf/verifier.c
20089
&cur->stack[spi].spilled_ptr, idmap, exact))
kernel/bpf/verifier.c
20094
cur_reg = &cur->stack[spi].spilled_ptr;
kernel/bpf/verifier.c
20102
cur_reg = &cur->stack[spi].spilled_ptr;
kernel/bpf/verifier.c
20118
cur_reg = &cur->stack[spi].spilled_ptr;
kernel/bpf/verifier.c
20135
static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur,
kernel/bpf/verifier.c
20140
if (old->acquired_refs != cur->acquired_refs)
kernel/bpf/verifier.c
20143
if (old->active_locks != cur->active_locks)
kernel/bpf/verifier.c
20146
if (old->active_preempt_locks != cur->active_preempt_locks)
kernel/bpf/verifier.c
20149
if (old->active_rcu_locks != cur->active_rcu_locks)
kernel/bpf/verifier.c
20152
if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
kernel/bpf/verifier.c
20155
if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) ||
kernel/bpf/verifier.c
20156
old->active_lock_ptr != cur->active_lock_ptr)
kernel/bpf/verifier.c
20160
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
kernel/bpf/verifier.c
20161
old->refs[i].type != cur->refs[i].type)
kernel/bpf/verifier.c
20170
if (old->refs[i].ptr != cur->refs[i].ptr)
kernel/bpf/verifier.c
20209
struct bpf_func_state *cur, u32 insn_idx, enum exact_level exact)
kernel/bpf/verifier.c
20214
if (old->callback_depth > cur->callback_depth)
kernel/bpf/verifier.c
20219
!regsafe(env, &old->regs[i], &cur->regs[i],
kernel/bpf/verifier.c
20223
if (!stacksafe(env, old, cur, &env->idmap_scratch, exact))
kernel/bpf/verifier.c
20239
struct bpf_verifier_state *cur,
kernel/bpf/verifier.c
20245
if (old->curframe != cur->curframe)
kernel/bpf/verifier.c
20253
if (old->speculative && !cur->speculative)
kernel/bpf/verifier.c
20256
if (old->in_sleepable != cur->in_sleepable)
kernel/bpf/verifier.c
20259
if (!refsafe(old, cur, &env->idmap_scratch))
kernel/bpf/verifier.c
20267
if (old->frame[i]->callsite != cur->frame[i]->callsite)
kernel/bpf/verifier.c
20269
if (!func_states_equal(env, old->frame[i], cur->frame[i], insn_idx, exact))
kernel/bpf/verifier.c
20280
struct bpf_verifier_state *cur,
kernel/bpf/verifier.c
20327
err = __mark_chain_precision(env, cur, -1, changed);
kernel/bpf/verifier.c
20372
struct bpf_verifier_state *cur)
kernel/bpf/verifier.c
20375
int i, fr = cur->curframe;
kernel/bpf/verifier.c
20381
fcur = cur->frame[fr];
kernel/bpf/verifier.c
20452
static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
kernel/bpf/verifier.c
20468
cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
kernel/bpf/verifier.c
20480
struct bpf_verifier_state *cur = env->cur_state, *new;
kernel/bpf/verifier.c
20487
cur->jmp_history_cnt > 40;
kernel/bpf/verifier.c
20502
clean_live_states(env, insn_idx, cur);
kernel/bpf/verifier.c
20516
frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
kernel/bpf/verifier.c
20567
if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
kernel/bpf/verifier.c
20572
cur_frame = cur->frame[cur->curframe];
kernel/bpf/verifier.c
20591
if (sl->state.may_goto_depth != cur->may_goto_depth &&
kernel/bpf/verifier.c
20592
states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
kernel/bpf/verifier.c
20598
if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) {
kernel/bpf/verifier.c
20605
if (states_maybe_looping(&sl->state, cur) &&
kernel/bpf/verifier.c
20606
states_equal(env, &sl->state, cur, EXACT) &&
kernel/bpf/verifier.c
20607
!iter_active_depths_differ(&sl->state, cur) &&
kernel/bpf/verifier.c
20608
sl->state.may_goto_depth == cur->may_goto_depth &&
kernel/bpf/verifier.c
20609
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
kernel/bpf/verifier.c
20613
print_verifier_state(env, cur, cur->curframe, true);
kernel/bpf/verifier.c
20615
print_verifier_state(env, &sl->state, cur->curframe, true);
kernel/bpf/verifier.c
20639
if (states_equal(env, &sl->state, cur, loop ? RANGE_WITHIN : NOT_EXACT)) {
kernel/bpf/verifier.c
20650
err = push_jmp_history(env, cur, 0, 0);
kernel/bpf/verifier.c
20651
err = err ? : propagate_precision(env, &sl->state, cur, NULL);
kernel/bpf/verifier.c
20733
err = copy_verifier_state(&backedge->state, cur);
kernel/bpf/verifier.c
20805
mark_all_scalars_imprecise(env, cur);
kernel/bpf/verifier.c
20807
clear_singular_ids(env, cur);
kernel/bpf/verifier.c
20811
err = copy_verifier_state(new, cur);
kernel/bpf/verifier.c
20828
cur->parent = new;
kernel/bpf/verifier.c
20829
cur->first_insn_idx = insn_idx;
kernel/bpf/verifier.c
20830
cur->dfs_depth = new->dfs_depth + 1;
kernel/bpf/verifier.c
20831
clear_jmp_history(cur);
kernel/bpf/verifier.c
2091
struct bpf_verifier_state *cur = env->cur_state;
kernel/bpf/verifier.c
2098
if (cur) {
kernel/bpf/verifier.c
2099
err = copy_verifier_state(cur, &head->st);
kernel/bpf/verifier.c
2133
struct bpf_verifier_state *cur = env->cur_state;
kernel/bpf/verifier.c
2147
err = copy_verifier_state(&elem->st, cur);
kernel/bpf/verifier.c
4086
static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
kernel/bpf/verifier.c
4089
u32 cnt = cur->jmp_history_cnt;
kernel/bpf/verifier.c
4112
p = krealloc(cur->jmp_history, alloc_size, GFP_KERNEL_ACCOUNT);
kernel/bpf/verifier.c
4115
cur->jmp_history = p;
kernel/bpf/verifier.c
4117
p = &cur->jmp_history[cnt - 1];
kernel/bpf/verifier.c
4122
cur->jmp_history_cnt = cnt;
kernel/bpf/verifier.c
5253
struct bpf_func_state *cur; /* state of the current function */
kernel/bpf/verifier.c
5270
cur = env->cur_state->frame[env->cur_state->curframe];
kernel/bpf/verifier.c
5272
reg = &cur->regs[value_regno];
kernel/bpf/verifier.c
5333
if (state != cur && reg->type == PTR_TO_STACK) {
kernel/bpf/verifier.c
5399
struct bpf_func_state *cur; /* state of the current function */
kernel/bpf/verifier.c
5410
cur = env->cur_state->frame[env->cur_state->curframe];
kernel/bpf/verifier.c
5411
ptr_reg = &cur->regs[ptr_regno];
kernel/bpf/verifier.c
5415
value_reg = &cur->regs[value_regno];
kernel/bpf/verifier.c
635
struct bpf_verifier_state *cur = env->cur_state;
kernel/bpf/verifier.c
637
return cur->frame[reg->frameno];
kernel/bpf/verifier.c
8587
struct bpf_verifier_state *cur = env->cur_state;
kernel/bpf/verifier.c
8636
if (!is_res_lock && cur->active_locks) {
kernel/bpf/verifier.c
8642
} else if (is_res_lock && cur->active_locks) {
kernel/bpf/verifier.c
8669
if (!cur->active_locks) {
kernel/bpf/verifier.c
8680
if (!find_lock_state(cur, type, reg->id, ptr)) {
kernel/bpf/verifier.c
8684
if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) {
kernel/bpf/verifier.c
8688
if (release_lock_state(cur, type, reg->id, ptr)) {
kernel/bpf/verifier.c
9058
struct bpf_verifier_state *cur,
kernel/bpf/verifier.c
9073
if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) &&
kernel/bpf/verifier.c
9074
st->dfs_depth < cur->dfs_depth)
kernel/bpf/verifier.c
9110
struct bpf_verifier_state *cur)
kernel/bpf/verifier.c
9117
fcur = cur->frame[fr];
kernel/cgroup/cpuset-internal.h
323
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
kernel/cgroup/cpuset-internal.h
337
static inline int cpuset1_validate_change(struct cpuset *cur,
kernel/cgroup/cpuset-v1.c
351
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial)
kernel/cgroup/cpuset-v1.c
361
cpuset_for_each_child(c, css, cur)
kernel/cgroup/cpuset-v1.c
367
par = parent_cs(cur);
kernel/cgroup/cpuset-v1.c
376
if (cpuset_is_populated(cur)) {
kernel/cgroup/cpuset-v1.c
377
if (!cpumask_empty(cur->cpus_allowed) &&
kernel/cgroup/cpuset-v1.c
380
if (!nodes_empty(cur->mems_allowed) &&
kernel/cgroup/cpuset.c
704
static int validate_change(struct cpuset *cur, struct cpuset *trial)
kernel/cgroup/cpuset.c
714
ret = cpuset1_validate_change(cur, trial);
kernel/cgroup/cpuset.c
719
if (cur == &top_cpuset)
kernel/cgroup/cpuset.c
722
par = parent_cs(cur);
kernel/cgroup/cpuset.c
742
if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) &&
kernel/cgroup/cpuset.c
743
!cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial)))
kernel/cgroup/cpuset.c
751
xcpus_changed = !cpumask_equal(cur->exclusive_cpus, trial->exclusive_cpus);
kernel/cgroup/cpuset.c
753
if (c == cur)
kernel/cpu.c
2860
ssize_t cur, res = 0;
kernel/cpu.c
2868
cur = sprintf(buf, "%3d: %s\n", i, sp->name);
kernel/cpu.c
2869
buf += cur;
kernel/cpu.c
2870
res += cur;
kernel/crash_reserve.c
105
if (*cur != ':') {
kernel/crash_reserve.c
109
cur++;
kernel/crash_reserve.c
111
size = memparse(cur, &tmp);
kernel/crash_reserve.c
112
if (cur == tmp) {
kernel/crash_reserve.c
116
cur = tmp;
kernel/crash_reserve.c
127
} while (*cur++ == ',');
kernel/crash_reserve.c
130
while (*cur && *cur != ' ' && *cur != '@')
kernel/crash_reserve.c
131
cur++;
kernel/crash_reserve.c
132
if (*cur == '@') {
kernel/crash_reserve.c
133
cur++;
kernel/crash_reserve.c
134
*crash_base = memparse(cur, &tmp);
kernel/crash_reserve.c
135
if (cur == tmp) {
kernel/crash_reserve.c
157
char *cur = cmdline;
kernel/crash_reserve.c
159
*crash_size = memparse(cmdline, &cur);
kernel/crash_reserve.c
160
if (cmdline == cur) {
kernel/crash_reserve.c
165
if (*cur == '@')
kernel/crash_reserve.c
166
*crash_base = memparse(cur+1, &cur);
kernel/crash_reserve.c
167
else if (*cur != ' ' && *cur != '\0') {
kernel/crash_reserve.c
168
pr_warn("crashkernel: unrecognized char: %c\n", *cur);
kernel/crash_reserve.c
197
char *cur = cmdline;
kernel/crash_reserve.c
199
*crash_size = memparse(cmdline, &cur);
kernel/crash_reserve.c
200
if (cmdline == cur) {
kernel/crash_reserve.c
206
if (strncmp(cur, suffix, strlen(suffix))) {
kernel/crash_reserve.c
207
pr_warn("crashkernel: unrecognized char: %c\n", *cur);
kernel/crash_reserve.c
210
cur += strlen(suffix);
kernel/crash_reserve.c
211
if (*cur != ' ' && *cur != '\0') {
kernel/crash_reserve.c
212
pr_warn("crashkernel: unrecognized char: %c\n", *cur);
kernel/crash_reserve.c
63
char *cur = cmdline, *tmp;
kernel/crash_reserve.c
79
start = memparse(cur, &tmp);
kernel/crash_reserve.c
80
if (cur == tmp) {
kernel/crash_reserve.c
84
cur = tmp;
kernel/crash_reserve.c
85
if (*cur != '-') {
kernel/crash_reserve.c
89
cur++;
kernel/crash_reserve.c
92
if (*cur != ':') {
kernel/crash_reserve.c
93
end = memparse(cur, &tmp);
kernel/crash_reserve.c
94
if (cur == tmp) {
kernel/crash_reserve.c
98
cur = tmp;
kernel/futex/core.c
1834
struct futex_private_hash *cur, *new;
kernel/futex/core.c
1836
cur = rcu_dereference_protected(mm->futex_phash,
kernel/futex/core.c
1842
if (cur && !cur->hash_mask) {
kernel/futex/core.c
1852
if (cur && !new) {
kernel/futex/core.c
1858
futex_ref_drop(cur);
kernel/kcsan/kcsan_test.c
164
char *cur;
kernel/kcsan/kcsan_test.c
178
cur = expect[0];
kernel/kcsan/kcsan_test.c
180
cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ",
kernel/kcsan/kcsan_test.c
190
cur += scnprintf(cur, end - cur, "%ps / %ps",
kernel/kcsan/kcsan_test.c
194
scnprintf(cur, end - cur, "%pS", r->access[0].fn);
kernel/kcsan/kcsan_test.c
196
cur = strchr(expect[0], '+');
kernel/kcsan/kcsan_test.c
197
if (cur)
kernel/kcsan/kcsan_test.c
198
*cur = '\0';
kernel/kcsan/kcsan_test.c
202
cur = expect[1];
kernel/kcsan/kcsan_test.c
205
cur += scnprintf(cur, end - cur, "race at unknown origin, with ");
kernel/kcsan/kcsan_test.c
229
cur = expect[2];
kernel/kcsan/kcsan_test.c
239
cur += scnprintf(cur, end - cur, "%s%s to ", access_type,
kernel/kcsan/kcsan_test.c
243
cur += scnprintf(cur, end - cur, "0x%px of %zu bytes",
kernel/kcsan/report.c
281
char *cur;
kernel/kcsan/report.c
292
cur = strnstr(buf, "kcsan_", len);
kernel/kcsan/report.c
293
if (cur) {
kernel/kcsan/report.c
294
cur += strlen("kcsan_");
kernel/kcsan/report.c
295
if (!str_has_prefix(cur, "test"))
kernel/kprobes.c
2014
struct llist_node **cur)
kernel/kprobes.c
2017
struct llist_node *node = *cur;
kernel/kprobes.c
2027
*cur = node;
kernel/kprobes.c
2052
struct llist_node **cur)
kernel/kprobes.c
2057
if (WARN_ON_ONCE(!cur))
kernel/kprobes.c
2061
ret = __kretprobe_find_ret_addr(tsk, cur);
kernel/kprobes.c
2064
ri = container_of(*cur, struct kretprobe_instance, llist);
kernel/liveupdate/kexec_handover.c
942
static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chunk *cur)
kernel/liveupdate/kexec_handover.c
954
if (cur)
kernel/liveupdate/kexec_handover.c
955
KHOSER_STORE_PTR(cur->hdr.next, chunk);
kernel/locking/locktorture.c
1003
long cur;
kernel/locking/locktorture.c
1013
cur = data_race(statp[i].n_lock_acquired);
kernel/locking/locktorture.c
1014
sum += cur;
kernel/locking/locktorture.c
1015
if (max < cur)
kernel/locking/locktorture.c
1016
max = cur;
kernel/locking/locktorture.c
1017
if (min > cur)
kernel/locking/locktorture.c
1018
min = cur;
kernel/locking/ww_mutex.h
375
struct MUTEX_WAITER *cur;
kernel/locking/ww_mutex.h
379
for (cur = __ww_waiter_first(lock); cur;
kernel/locking/ww_mutex.h
380
cur = __ww_waiter_next(lock, cur)) {
kernel/locking/ww_mutex.h
382
if (!cur->ww_ctx)
kernel/locking/ww_mutex.h
385
if (__ww_mutex_die(lock, cur, ww_ctx, wake_q) ||
kernel/locking/ww_mutex.h
386
__ww_mutex_wound(lock, cur->ww_ctx, ww_ctx, wake_q))
kernel/locking/ww_mutex.h
470
struct MUTEX_WAITER *cur;
kernel/locking/ww_mutex.h
489
for (cur = __ww_waiter_prev(lock, waiter); cur;
kernel/locking/ww_mutex.h
490
cur = __ww_waiter_prev(lock, cur)) {
kernel/locking/ww_mutex.h
492
if (!cur->ww_ctx)
kernel/locking/ww_mutex.h
518
struct MUTEX_WAITER *cur, *pos = NULL;
kernel/locking/ww_mutex.h
535
for (cur = __ww_waiter_last(lock); cur;
kernel/locking/ww_mutex.h
536
cur = __ww_waiter_prev(lock, cur)) {
kernel/locking/ww_mutex.h
538
if (!cur->ww_ctx)
kernel/locking/ww_mutex.h
541
if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
kernel/locking/ww_mutex.h
557
pos = cur;
kernel/locking/ww_mutex.h
560
__ww_mutex_die(lock, cur, ww_ctx, wake_q);
kernel/padata.c
137
struct padata_work *cur, *next;
kernel/padata.c
143
list_for_each_entry_safe(cur, next, works, pw_list) {
kernel/padata.c
144
list_del(&cur->pw_list);
kernel/padata.c
145
padata_work_free(cur);
kernel/padata.c
364
struct padata_priv *cur;
kernel/padata.c
371
cur = list_entry(pos, struct padata_priv, list);
kernel/padata.c
373
if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
kernel/power/power.h
143
unsigned int cur; /* number of the block of PAGE_SIZE bytes the
kernel/power/snapshot.c
2237
if (handle->cur > nr_meta_pages + nr_copy_pages)
kernel/power/snapshot.c
2246
if (!handle->cur) {
kernel/power/snapshot.c
2255
} else if (handle->cur <= nr_meta_pages) {
kernel/power/snapshot.c
2278
handle->cur++;
kernel/power/snapshot.c
2776
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
kernel/power/snapshot.c
2779
if (!handle->cur) {
kernel/power/snapshot.c
2788
} else if (handle->cur == 1) {
kernel/power/snapshot.c
2806
} else if (handle->cur <= nr_meta_pages + 1) {
kernel/power/snapshot.c
2811
if (handle->cur == nr_meta_pages + 1) {
kernel/power/snapshot.c
2834
handle->cur++;
kernel/power/snapshot.c
2837
if (handle->cur > nr_meta_pages + 1 &&
kernel/power/snapshot.c
2864
if (handle->cur > nr_meta_pages + 1) {
kernel/power/snapshot.c
2872
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
kernel/power/snapshot.c
2882
handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
kernel/power/snapshot.c
418
struct bm_position cur; /* most recently used bit position */
kernel/power/snapshot.c
590
bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
kernel/power/snapshot.c
592
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
kernel/power/snapshot.c
594
bm->cur.node_pfn = 0;
kernel/power/snapshot.c
595
bm->cur.cur_pfn = BM_END_OF_MAP;
kernel/power/snapshot.c
596
bm->cur.node_bit = 0;
kernel/power/snapshot.c
636
struct mem_extent *ext, *cur, *aux;
kernel/power/snapshot.c
667
cur = ext;
kernel/power/snapshot.c
668
list_for_each_entry_safe_continue(cur, aux, list, hook) {
kernel/power/snapshot.c
669
if (zone_end < cur->start)
kernel/power/snapshot.c
671
if (zone_end < cur->end)
kernel/power/snapshot.c
672
ext->end = cur->end;
kernel/power/snapshot.c
673
list_del(&cur->hook);
kernel/power/snapshot.c
674
kfree(cur);
kernel/power/snapshot.c
755
zone = bm->cur.zone;
kernel/power/snapshot.c
784
node = bm->cur.node;
kernel/power/snapshot.c
785
if (zone == bm->cur.zone &&
kernel/power/snapshot.c
786
((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
kernel/power/snapshot.c
803
bm->cur.zone = zone;
kernel/power/snapshot.c
804
bm->cur.node = node;
kernel/power/snapshot.c
805
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
kernel/power/snapshot.c
806
bm->cur.cur_pfn = pfn;
kernel/power/snapshot.c
854
bit = max(bm->cur.node_bit - 1, 0);
kernel/power/snapshot.c
855
clear_bit(bit, bm->cur.node->data);
kernel/power/snapshot.c
860
return bm->cur.cur_pfn;
kernel/power/snapshot.c
894
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
kernel/power/snapshot.c
895
bm->cur.node = list_entry(bm->cur.node->list.next,
kernel/power/snapshot.c
897
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
kernel/power/snapshot.c
898
bm->cur.node_bit = 0;
kernel/power/snapshot.c
904
if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
kernel/power/snapshot.c
905
bm->cur.zone = list_entry(bm->cur.zone->list.next,
kernel/power/snapshot.c
907
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
kernel/power/snapshot.c
909
bm->cur.node_pfn = 0;
kernel/power/snapshot.c
910
bm->cur.node_bit = 0;
kernel/power/snapshot.c
935
pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
kernel/power/snapshot.c
936
bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
kernel/power/snapshot.c
937
bit = find_next_bit(bm->cur.node->data, bits,
kernel/power/snapshot.c
938
bm->cur.node_bit);
kernel/power/snapshot.c
940
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
kernel/power/snapshot.c
941
bm->cur.node_bit = bit + 1;
kernel/power/snapshot.c
942
bm->cur.cur_pfn = pfn;
kernel/power/snapshot.c
947
bm->cur.cur_pfn = BM_END_OF_MAP;
kernel/power/swap.c
1000
handle->cur = NULL;
kernel/power/swap.c
1015
handle->cur = NULL;
kernel/power/swap.c
1045
handle->cur = handle->maps->map;
kernel/power/swap.c
1056
if (!handle->cur)
kernel/power/swap.c
1058
offset = handle->cur->entries[handle->k];
kernel/power/swap.c
1076
handle->cur = handle->maps->map;
kernel/power/swap.c
1348
if (handle->cur &&
kernel/power/swap.c
1349
handle->cur->entries[handle->k]) {
kernel/power/swap.c
389
if (handle->cur)
kernel/power/swap.c
390
free_page((unsigned long)handle->cur);
kernel/power/swap.c
391
handle->cur = NULL;
kernel/power/swap.c
404
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
kernel/power/swap.c
405
if (!handle->cur) {
kernel/power/swap.c
431
if (!handle->cur)
kernel/power/swap.c
437
handle->cur->entries[handle->k++] = offset;
kernel/power/swap.c
442
handle->cur->next_swap = offset;
kernel/power/swap.c
443
error = write_page(handle->cur, handle->cur_swap, hb);
kernel/power/swap.c
446
clear_page(handle->cur);
kernel/power/swap.c
467
if (handle->cur && handle->cur_swap)
kernel/power/swap.c
468
return write_page(handle->cur, handle->cur_swap, NULL);
kernel/power/swap.c
94
struct swap_map_page *cur;
kernel/printk/nbcon.c
1047
nbcon_state_read(con, &cur);
kernel/printk/nbcon.c
1048
if (!nbcon_context_can_proceed(ctxt, &cur))
kernel/printk/nbcon.c
156
static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_state *cur,
kernel/printk/nbcon.c
159
return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
kernel/printk/nbcon.c
244
struct nbcon_state *cur, bool is_reacquire)
kernel/printk/nbcon.c
266
(!is_reacquire || cur->unsafe_takeover)) {
kernel/printk/nbcon.c
270
if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio)
kernel/printk/nbcon.c
273
if (cur->unsafe)
kernel/printk/nbcon.c
280
WARN_ON_ONCE(cur->unsafe_takeover);
kernel/printk/nbcon.c
282
new.atom = cur->atom;
kernel/printk/nbcon.c
285
new.unsafe = cur->unsafe_takeover;
kernel/printk/nbcon.c
288
} while (!nbcon_state_try_cmpxchg(con, cur, &new));
kernel/printk/nbcon.c
293
static bool nbcon_waiter_matches(struct nbcon_state *cur, int expected_prio)
kernel/printk/nbcon.c
324
return (cur->req_prio == expected_prio);
kernel/printk/nbcon.c
352
struct nbcon_state *cur)
kernel/printk/nbcon.c
366
if (!nbcon_waiter_matches(cur, ctxt->prio))
kernel/printk/nbcon.c
370
if (cur->prio != NBCON_PRIO_NONE)
kernel/printk/nbcon.c
377
WARN_ON_ONCE(cur->unsafe);
kernel/printk/nbcon.c
379
new.atom = cur->atom;
kernel/printk/nbcon.c
382
new.unsafe = cur->unsafe_takeover;
kernel/printk/nbcon.c
385
if (!nbcon_state_try_cmpxchg(con, cur, &new)) {
kernel/printk/nbcon.c
390
WARN_ON_ONCE(nbcon_waiter_matches(cur, ctxt->prio));
kernel/printk/nbcon.c
435
struct nbcon_state *cur)
kernel/printk/nbcon.c
447
WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
kernel/printk/nbcon.c
448
WARN_ON_ONCE(!cur->unsafe);
kernel/printk/nbcon.c
461
if (cur->cpu == cpu)
kernel/printk/nbcon.c
468
if (cur->unsafe_takeover)
kernel/printk/nbcon.c
479
new.atom = cur->atom;
kernel/printk/nbcon.c
481
if (!nbcon_state_try_cmpxchg(con, cur, &new))
kernel/printk/nbcon.c
484
cur->atom = new.atom;
kernel/printk/nbcon.c
489
request_err = nbcon_context_try_acquire_requested(ctxt, cur);
kernel/printk/nbcon.c
503
nbcon_state_read(con, cur);
kernel/printk/nbcon.c
513
if (!nbcon_waiter_matches(cur, ctxt->prio))
kernel/printk/nbcon.c
517
new.atom = cur->atom;
kernel/printk/nbcon.c
519
if (nbcon_state_try_cmpxchg(con, cur, &new)) {
kernel/printk/nbcon.c
524
cur->atom = new.atom;
kernel/printk/nbcon.c
532
} while (nbcon_context_try_acquire_requested(ctxt, cur));
kernel/printk/nbcon.c
551
struct nbcon_state *cur)
kernel/printk/nbcon.c
568
WARN_ON_ONCE(ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio);
kernel/printk/nbcon.c
569
WARN_ON_ONCE(cur->unsafe != true);
kernel/printk/nbcon.c
572
new.atom = cur->atom;
kernel/printk/nbcon.c
575
new.unsafe |= cur->unsafe_takeover;
kernel/printk/nbcon.c
576
new.unsafe_takeover |= cur->unsafe;
kernel/printk/nbcon.c
578
} while (!nbcon_state_try_cmpxchg(con, cur, &new));
kernel/printk/nbcon.c
601
struct nbcon_state cur;
kernel/printk/nbcon.c
604
nbcon_state_read(con, &cur);
kernel/printk/nbcon.c
606
err = nbcon_context_try_acquire_direct(ctxt, &cur, is_reacquire);
kernel/printk/nbcon.c
610
err = nbcon_context_try_acquire_handover(ctxt, &cur);
kernel/printk/nbcon.c
616
err = nbcon_context_try_acquire_hostile(ctxt, &cur);
kernel/printk/nbcon.c
635
static bool nbcon_owner_matches(struct nbcon_state *cur, int expected_cpu,
kernel/printk/nbcon.c
664
if (cur->prio != expected_prio)
kernel/printk/nbcon.c
667
if (cur->cpu != expected_cpu)
kernel/printk/nbcon.c
681
struct nbcon_state cur;
kernel/printk/nbcon.c
684
nbcon_state_read(con, &cur);
kernel/printk/nbcon.c
687
if (!nbcon_owner_matches(&cur, cpu, ctxt->prio))
kernel/printk/nbcon.c
690
new.atom = cur.atom;
kernel/printk/nbcon.c
697
new.unsafe |= cur.unsafe_takeover;
kernel/printk/nbcon.c
699
} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
kernel/printk/nbcon.c
730
static bool nbcon_context_can_proceed(struct nbcon_context *ctxt, struct nbcon_state *cur)
kernel/printk/nbcon.c
735
if (!nbcon_owner_matches(cur, cpu, ctxt->prio))
kernel/printk/nbcon.c
739
if (cur->req_prio == NBCON_PRIO_NONE)
kernel/printk/nbcon.c
748
if (cur->unsafe)
kernel/printk/nbcon.c
752
WARN_ON_ONCE(cur->req_prio <= cur->prio);
kernel/printk/nbcon.c
802
struct nbcon_state cur;
kernel/printk/nbcon.c
804
nbcon_state_read(con, &cur);
kernel/printk/nbcon.c
806
return nbcon_context_can_proceed(ctxt, &cur);
kernel/printk/nbcon.c
835
struct nbcon_state cur;
kernel/printk/nbcon.c
838
nbcon_state_read(con, &cur);
kernel/printk/nbcon.c
845
if (!unsafe && cur.unsafe_takeover)
kernel/printk/nbcon.c
848
if (!nbcon_context_can_proceed(ctxt, &cur))
kernel/printk/nbcon.c
851
new.atom = cur.atom;
kernel/printk/nbcon.c
853
} while (!nbcon_state_try_cmpxchg(con, &cur, &new));
kernel/printk/nbcon.c
855
cur.atom = new.atom;
kernel/printk/nbcon.c
857
return nbcon_context_can_proceed(ctxt, &cur);
kernel/printk/nbcon.c
865
struct nbcon_state cur;
kernel/printk/nbcon.c
869
nbcon_state_read(con, &cur);
kernel/printk/nbcon.c
870
wctxt->unsafe_takeover = cur.unsafe_takeover;
kernel/printk/nbcon.c
989
struct nbcon_state cur;
kernel/resource.c
1629
struct resource *cur;
kernel/resource.c
1638
cur = res->sibling;
kernel/resource.c
1639
if (cur && system_ram_resources_mergeable(res, cur)) {
kernel/resource.c
1640
res->end = cur->end;
kernel/resource.c
1641
res->sibling = cur->sibling;
kernel/resource.c
1642
free_resource(cur);
kernel/resource.c
1646
cur = res->parent->child;
kernel/resource.c
1647
while (cur && cur->sibling != res)
kernel/resource.c
1648
cur = cur->sibling;
kernel/resource.c
1649
if (cur && system_ram_resources_mergeable(cur, res)) {
kernel/resource.c
1650
cur->end = res->end;
kernel/resource.c
1651
cur->sibling = res->sibling;
kernel/sched/core.c
3365
int migrate_swap(struct task_struct *cur, struct task_struct *p,
kernel/sched/core.c
3372
.src_task = cur,
kernel/sched/core.c
3394
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
kernel/sched/core.c
7992
int cpuset_cpumask_can_shrink(const struct cpumask *cur,
kernel/sched/core.c
7997
if (cpumask_empty(cur))
kernel/sched/core.c
8000
ret = dl_cpuset_cpumask_can_shrink(cur, trial);
kernel/sched/cpufreq_schedutil.c
168
return policy->cur + (policy->cur >> 2);
kernel/sched/deadline.c
3730
int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
kernel/sched/deadline.c
3738
cur_dl_b = dl_bw_of(cpumask_any(cur));
kernel/sched/ext.c
442
struct task_struct *cur, bool rev)
kernel/sched/ext.c
449
if (cur)
kernel/sched/ext.c
450
list_node = &cur->scx.dsq_list.node;
kernel/sched/fair.c
2324
struct task_struct *cur;
kernel/sched/fair.c
2335
cur = rcu_dereference_all(dst_rq->curr);
kernel/sched/fair.c
2336
if (cur && ((cur->flags & (PF_EXITING | PF_KTHREAD)) ||
kernel/sched/fair.c
2337
!cur->mm))
kernel/sched/fair.c
2338
cur = NULL;
kernel/sched/fair.c
2344
if (cur == env->p) {
kernel/sched/fair.c
2349
if (!cur) {
kernel/sched/fair.c
2357
if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
kernel/sched/fair.c
2366
cur->numa_preferred_nid != env->src_nid) {
kernel/sched/fair.c
2380
cur_ng = rcu_dereference_all(cur->numa_group);
kernel/sched/fair.c
2391
imp = taskimp + task_weight(cur, env->src_nid, dist) -
kernel/sched/fair.c
2392
task_weight(cur, env->dst_nid, dist);
kernel/sched/fair.c
2405
imp += group_weight(cur, env->src_nid, dist) -
kernel/sched/fair.c
2406
group_weight(cur, env->dst_nid, dist);
kernel/sched/fair.c
2408
imp += task_weight(cur, env->src_nid, dist) -
kernel/sched/fair.c
2409
task_weight(cur, env->dst_nid, dist);
kernel/sched/fair.c
2413
if (cur->numa_preferred_nid == env->dst_nid)
kernel/sched/fair.c
2422
if (cur->numa_preferred_nid == env->src_nid)
kernel/sched/fair.c
2427
cur = NULL;
kernel/sched/fair.c
2435
if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
kernel/sched/fair.c
2452
load = task_h_load(env->p) - task_h_load(cur);
kernel/sched/fair.c
2464
if (!cur) {
kernel/sched/fair.c
2483
task_numa_assign(env, cur, imp);
kernel/sched/fair.c
2490
if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
kernel/sched/sched.h
362
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
kernel/sched/topology.c
2803
static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
kernel/sched/topology.c
2809
if (!new && !cur)
kernel/sched/topology.c
2814
return !memcmp(cur ? (cur + idx_cur) : &tmp,
kernel/seccomp.c
1503
struct seccomp_knotif *cur;
kernel/seccomp.c
1507
list_for_each_entry(cur, &filter->notif->notifications, list) {
kernel/seccomp.c
1508
if (cur->id == id)
kernel/seccomp.c
1509
return cur;
kernel/seccomp.c
1555
struct seccomp_knotif *knotif = NULL, *cur;
kernel/seccomp.c
1573
list_for_each_entry(cur, &filter->notif->notifications, list) {
kernel/seccomp.c
1574
if (cur->state == SECCOMP_NOTIFY_INIT) {
kernel/seccomp.c
1575
knotif = cur;
kernel/seccomp.c
1860
struct seccomp_knotif *cur;
kernel/seccomp.c
1867
list_for_each_entry(cur, &filter->notif->notifications, list) {
kernel/seccomp.c
1868
if (cur->state == SECCOMP_NOTIFY_INIT)
kernel/seccomp.c
1870
if (cur->state == SECCOMP_NOTIFY_SENT)
kernel/seccomp.c
1928
struct seccomp_filter *cur;
kernel/seccomp.c
1935
for (cur = current->seccomp.filter; cur; cur = cur->prev) {
kernel/seccomp.c
1936
if (cur->notif)
kernel/seccomp.c
2330
const struct seccomp_log_name *cur;
kernel/seccomp.c
2333
for (cur = seccomp_log_names; cur->name && size; cur++) {
kernel/seccomp.c
2336
if (!(actions_logged & cur->log))
kernel/seccomp.c
2349
ret = strscpy(names, cur->name, size);
kernel/seccomp.c
2363
const struct seccomp_log_name *cur;
kernel/seccomp.c
2365
for (cur = seccomp_log_names; cur->name; cur++) {
kernel/seccomp.c
2366
if (!strcmp(cur->name, name)) {
kernel/seccomp.c
2367
*action_logged = cur->log;
kernel/smpboot.c
211
struct smp_hotplug_thread *cur;
kernel/smpboot.c
215
list_for_each_entry(cur, &hotplug_threads, list) {
kernel/smpboot.c
216
ret = __smpboot_create_thread(cur, cpu);
kernel/smpboot.c
234
struct smp_hotplug_thread *cur;
kernel/smpboot.c
237
list_for_each_entry(cur, &hotplug_threads, list)
kernel/smpboot.c
238
smpboot_unpark_thread(cur, cpu);
kernel/smpboot.c
253
struct smp_hotplug_thread *cur;
kernel/smpboot.c
256
list_for_each_entry_reverse(cur, &hotplug_threads, list)
kernel/smpboot.c
257
smpboot_park_thread(cur, cpu);
kernel/time/tick-broadcast.c
165
struct clock_event_device *cur = tick_broadcast_device.evtdev;
kernel/time/tick-broadcast.c
170
if (!tick_check_broadcast_device(cur, dev))
kernel/time/tick-broadcast.c
176
clockevents_exchange_device(cur, dev);
kernel/time/tick-broadcast.c
177
if (cur)
kernel/time/tick-broadcast.c
178
cur->event_handler = clockevents_handle_noop;
kernel/trace/rethook.c
206
struct llist_node **cur)
kernel/trace/rethook.c
209
struct llist_node *node = *cur;
kernel/trace/rethook.c
219
*cur = node;
kernel/trace/rethook.c
245
struct llist_node **cur)
kernel/trace/rethook.c
250
if (WARN_ON_ONCE(!cur))
kernel/trace/rethook.c
257
ret = __rethook_find_ret_addr(tsk, cur);
kernel/trace/rethook.c
260
rhn = container_of(*cur, struct rethook_node, llist);
kernel/workqueue.c
8081
int cur, pre, cpu, pod;
kernel/workqueue.c
8089
for_each_possible_cpu(cur) {
kernel/workqueue.c
8091
if (pre >= cur) {
kernel/workqueue.c
8092
pt->cpu_pod[cur] = pt->nr_pods++;
kernel/workqueue.c
8095
if (cpus_share_pod(cur, pre)) {
kernel/workqueue.c
8096
pt->cpu_pod[cur] = pt->cpu_pod[pre];
lib/cmdline.c
58
char *cur = *str;
lib/cmdline.c
61
if (!cur || !(*cur))
lib/cmdline.c
63
if (*cur == '-')
lib/cmdline.c
64
value = -simple_strtoull(++cur, str, 0);
lib/cmdline.c
66
value = simple_strtoull(cur, str, 0);
lib/cmdline.c
69
if (cur == *str)
lib/debugobjects.c
453
unsigned long cur, now = jiffies;
lib/debugobjects.c
462
cur = READ_ONCE(pool_global.stats.cur_used) * ODEBUG_FREE_WORK_MAX;
lib/debugobjects.c
463
WRITE_ONCE(avg_usage, calc_load(avg_usage, EXP_5, cur));
lib/errseq.c
102
if (likely(cur == old || cur == new))
lib/errseq.c
106
old = cur;
lib/errseq.c
108
return cur;
lib/errseq.c
148
errseq_t cur = READ_ONCE(*eseq);
lib/errseq.c
150
if (likely(cur == since))
lib/errseq.c
152
return -(cur & ERRNO_MASK);
lib/errseq.c
64
errseq_t cur, old;
lib/errseq.c
91
cur = new;
lib/errseq.c
96
cur = cmpxchg(eseq, old, new);
lib/interval_tree.c
37
struct interval_tree_node *cur = state->nodes[1];
lib/interval_tree.c
39
state->nodes[0] = cur;
lib/interval_tree.c
41
if (cur->last > state->nodes[0]->last)
lib/interval_tree.c
42
state->nodes[0] = cur;
lib/interval_tree.c
43
cur = interval_tree_iter_next(cur, state->first_index,
lib/interval_tree.c
45
} while (cur && (state->nodes[0]->last >= cur->start ||
lib/interval_tree.c
46
state->nodes[0]->last + 1 == cur->start));
lib/interval_tree.c
47
state->nodes[1] = cur;
lib/kobject.c
130
int cur = strlen(kobject_name(parent));
lib/kobject.c
132
length -= cur;
lib/kobject.c
135
memcpy(path + length, kobject_name(parent), cur);
lib/rbtree_test.c
174
struct test_node *cur, *n;
lib/rbtree_test.c
176
rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
lib/scatterlist.c
390
struct scatterlist *cur,
lib/scatterlist.c
397
if (cur) {
lib/scatterlist.c
398
next_sg = sg_next(cur);
lib/scatterlist.c
409
if (cur) {
lib/tests/list-test.c
1038
struct hlist_node entries[3], *cur;
lib/tests/list-test.c
1046
hlist_for_each(cur, &list) {
lib/tests/list-test.c
1047
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
1057
struct hlist_node entries[3], *cur, *n;
lib/tests/list-test.c
1065
hlist_for_each_safe(cur, n, &list) {
lib/tests/list-test.c
1066
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
1077
struct hlist_test_struct entries[5], *cur;
lib/tests/list-test.c
1090
hlist_for_each_entry(cur, &list, list) {
lib/tests/list-test.c
1091
KUNIT_EXPECT_EQ(test, cur->data, i);
lib/tests/list-test.c
1100
struct hlist_test_struct entries[5], *cur;
lib/tests/list-test.c
1114
cur = &entries[0];
lib/tests/list-test.c
1115
hlist_for_each_entry_continue(cur, list) {
lib/tests/list-test.c
1116
KUNIT_EXPECT_EQ(test, cur->data, i);
lib/tests/list-test.c
1118
cur->data = 42;
lib/tests/list-test.c
1131
struct hlist_test_struct entries[5], *cur;
lib/tests/list-test.c
1144
cur = &entries[0];
lib/tests/list-test.c
1145
hlist_for_each_entry_from(cur, list) {
lib/tests/list-test.c
1146
KUNIT_EXPECT_EQ(test, cur->data, i);
lib/tests/list-test.c
1148
cur->data = 42;
lib/tests/list-test.c
1159
struct hlist_test_struct entries[5], *cur;
lib/tests/list-test.c
1173
hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
lib/tests/list-test.c
1174
KUNIT_EXPECT_EQ(test, cur->data, i);
lib/tests/list-test.c
1175
hlist_del(&cur->list);
lib/tests/list-test.c
391
struct list_head entries[3], *cur;
lib/tests/list-test.c
404
list_for_each(cur, &list2) {
lib/tests/list-test.c
405
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
411
list_for_each(cur, &list1) {
lib/tests/list-test.c
412
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
421
struct list_head entries[3], *cur;
lib/tests/list-test.c
434
list_for_each(cur, &list2) {
lib/tests/list-test.c
435
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
441
list_for_each(cur, &list1) {
lib/tests/list-test.c
442
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
451
struct list_head entries[5], *cur;
lib/tests/list-test.c
466
list_for_each(cur, &list1) {
lib/tests/list-test.c
467
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
476
struct list_head entries[5], *cur;
lib/tests/list-test.c
491
list_for_each(cur, &list1) {
lib/tests/list-test.c
492
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
501
struct list_head entries[5], *cur;
lib/tests/list-test.c
516
list_for_each(cur, &list1) {
lib/tests/list-test.c
517
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
528
struct list_head entries[5], *cur;
lib/tests/list-test.c
543
list_for_each(cur, &list1) {
lib/tests/list-test.c
544
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
651
struct list_head entries[3], *cur;
lib/tests/list-test.c
659
list_for_each(cur, &list) {
lib/tests/list-test.c
660
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
669
struct list_head entries[3], *cur;
lib/tests/list-test.c
677
list_for_each_prev(cur, &list) {
lib/tests/list-test.c
678
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
687
struct list_head entries[3], *cur, *n;
lib/tests/list-test.c
696
list_for_each_safe(cur, n, &list) {
lib/tests/list-test.c
697
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
708
struct list_head entries[3], *cur, *n;
lib/tests/list-test.c
716
list_for_each_prev_safe(cur, n, &list) {
lib/tests/list-test.c
717
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
lib/tests/list-test.c
728
struct list_test_struct entries[5], *cur;
lib/tests/list-test.c
739
list_for_each_entry(cur, &list, list) {
lib/tests/list-test.c
740
KUNIT_EXPECT_EQ(test, cur->data, i);
lib/tests/list-test.c
749
struct list_test_struct entries[5], *cur;
lib/tests/list-test.c
760
list_for_each_entry_reverse(cur, &list, list) {
lib/tests/list-test.c
761
KUNIT_EXPECT_EQ(test, cur->data, i);
lib/tests/test_list_sort.c
104
KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
lib/tests/test_list_sort.c
62
struct list_head *cur;
lib/tests/test_list_sort.c
84
for (cur = head.next; cur->next != &head; cur = cur->next) {
lib/tests/test_list_sort.c
88
KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
lib/tests/test_list_sort.c
91
cmp_result = cmp(test, cur, cur->next);
lib/tests/test_list_sort.c
94
el = container_of(cur, struct debug_el, list);
lib/tests/test_list_sort.c
95
el1 = container_of(cur->next, struct debug_el, list);
lib/ts_fsm.c
137
struct ts_fsm_token *cur = NULL, *next;
lib/ts_fsm.c
166
cur = &fsm->tokens[tok_idx];
lib/ts_fsm.c
173
switch (cur->recur) {
lib/ts_fsm.c
178
if (!match_token(cur, data[block_idx]))
lib/ts_fsm.c
184
!match_token(cur, data[block_idx]))
lib/ts_fsm.c
192
if (!match_token(cur, data[block_idx]))
lib/ts_fsm.c
206
if (!match_token(cur, data[block_idx]))
lib/ts_fsm.c
229
if (!match_token(cur, data[block_idx]))
lib/zstd/compress/zstd_ldm.c
410
ldmEntry_t const* cur;
lib/zstd/compress/zstd_ldm.c
425
for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
lib/zstd/compress/zstd_ldm.c
428
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
lib/zstd/compress/zstd_ldm.c
433
cur->offset < dictLimit ? dictBase : base;
lib/zstd/compress/zstd_ldm.c
434
BYTE const* const pMatch = curMatchBase + cur->offset;
lib/zstd/compress/zstd_ldm.c
436
cur->offset < dictLimit ? dictEnd : iend;
lib/zstd/compress/zstd_ldm.c
438
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
lib/zstd/compress/zstd_ldm.c
447
BYTE const* const pMatch = base + cur->offset;
lib/zstd/compress/zstd_ldm.c
461
bestEntry = cur;
lib/zstd/compress/zstd_opt.c
1121
U32 cur, last_pos = 0;
lib/zstd/compress/zstd_opt.c
1167
cur = 0;
lib/zstd/compress/zstd_opt.c
1201
for (cur = 1; cur <= last_pos; cur++) {
lib/zstd/compress/zstd_opt.c
1202
const BYTE* const inr = ip + cur;
lib/zstd/compress/zstd_opt.c
1203
assert(cur <= ZSTD_OPT_NUM);
lib/zstd/compress/zstd_opt.c
1204
DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur);
lib/zstd/compress/zstd_opt.c
1207
{ U32 const litlen = opt[cur-1].litlen + 1;
lib/zstd/compress/zstd_opt.c
1208
int const price = opt[cur-1].price
lib/zstd/compress/zstd_opt.c
1209
+ LIT_PRICE(ip+cur-1)
lib/zstd/compress/zstd_opt.c
1212
if (price <= opt[cur].price) {
lib/zstd/compress/zstd_opt.c
1213
ZSTD_optimal_t const prevMatch = opt[cur];
lib/zstd/compress/zstd_opt.c
1215
(int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
lib/zstd/compress/zstd_opt.c
1216
opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
lib/zstd/compress/zstd_opt.c
1217
opt[cur] = opt[cur-1];
lib/zstd/compress/zstd_opt.c
1218
opt[cur].litlen = litlen;
lib/zstd/compress/zstd_opt.c
1219
opt[cur].price = price;
lib/zstd/compress/zstd_opt.c
1223
&& LIKELY(ip + cur < iend)
lib/zstd/compress/zstd_opt.c
1226
int with1literal = prevMatch.price + LIT_PRICE(ip+cur) + LL_INCPRICE(1);
lib/zstd/compress/zstd_opt.c
1227
int withMoreLiterals = price + LIT_PRICE(ip+cur) + LL_INCPRICE(litlen+1);
lib/zstd/compress/zstd_opt.c
1229
cur+1, ZSTD_fCost(with1literal), litlen+1, ZSTD_fCost(withMoreLiterals));
lib/zstd/compress/zstd_opt.c
1231
&& (with1literal < opt[cur+1].price) ) {
lib/zstd/compress/zstd_opt.c
1233
U32 const prev = cur - prevMatch.mlen;
lib/zstd/compress/zstd_opt.c
1235
assert(cur >= prevMatch.mlen);
lib/zstd/compress/zstd_opt.c
1239
opt[cur+1] = prevMatch; /* mlen & offbase */
lib/zstd/compress/zstd_opt.c
1240
ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t));
lib/zstd/compress/zstd_opt.c
1241
opt[cur+1].litlen = 1;
lib/zstd/compress/zstd_opt.c
1242
opt[cur+1].price = with1literal;
lib/zstd/compress/zstd_opt.c
1243
if (last_pos < cur+1) last_pos = cur+1;
lib/zstd/compress/zstd_opt.c
1248
(int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price));
lib/zstd/compress/zstd_opt.c
1255
ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t));
lib/zstd/compress/zstd_opt.c
1256
assert(cur >= opt[cur].mlen);
lib/zstd/compress/zstd_opt.c
1257
if (opt[cur].litlen == 0) {
lib/zstd/compress/zstd_opt.c
1259
U32 const prev = cur - opt[cur].mlen;
lib/zstd/compress/zstd_opt.c
1260
Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0);
lib/zstd/compress/zstd_opt.c
1261
ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t));
lib/zstd/compress/zstd_opt.c
1267
if (cur == last_pos) break;
lib/zstd/compress/zstd_opt.c
1270
&& (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
lib/zstd/compress/zstd_opt.c
1271
DEBUGLOG(7, "skip current position : next rPos(%u) price is cheaper", cur+1);
lib/zstd/compress/zstd_opt.c
1275
assert(opt[cur].price >= 0);
lib/zstd/compress/zstd_opt.c
1276
{ U32 const ll0 = (opt[cur].litlen == 0);
lib/zstd/compress/zstd_opt.c
1277
int const previousPrice = opt[cur].price;
lib/zstd/compress/zstd_opt.c
1279
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
lib/zstd/compress/zstd_opt.c
1287
DEBUGLOG(7, "rPos:%u : no match found", cur);
lib/zstd/compress/zstd_opt.c
1293
(int)(inr-istart), cur, nbMatches, longestML);
lib/zstd/compress/zstd_opt.c
1296
|| (cur + longestML >= ZSTD_OPT_NUM)
lib/zstd/compress/zstd_opt.c
1297
|| (ip + cur + longestML >= iend) ) {
lib/zstd/compress/zstd_opt.c
1301
last_pos = cur + longestML;
lib/zstd/compress/zstd_opt.c
1313
matchNb, matches[matchNb].off, lastML, opt[cur].litlen);
lib/zstd/compress/zstd_opt.c
1316
U32 const pos = cur + mlen;
lib/zstd/compress/zstd_opt.c
1342
assert(cur >= lastStretch.mlen);
lib/zstd/compress/zstd_opt.c
1343
cur = last_pos - lastStretch.mlen;
lib/zstd/compress/zstd_opt.c
1348
assert(cur == last_pos - lastStretch.mlen);
lib/zstd/compress/zstd_opt.c
1361
Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0);
lib/zstd/compress/zstd_opt.c
1365
assert(cur >= lastStretch.litlen);
lib/zstd/compress/zstd_opt.c
1366
cur -= lastStretch.litlen;
lib/zstd/compress/zstd_opt.c
1377
{ U32 const storeEnd = cur + 2;
lib/zstd/compress/zstd_opt.c
1379
U32 stretchPos = cur;
lib/zstd/compress/zstd_opt.c
1382
last_pos, cur); (void)last_pos;
mm/gup.c
2050
unsigned long cur;
mm/gup.c
2058
for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
mm/gup.c
2059
unsafe_put_user(0, (char __user *)cur, out);
mm/gup.c
2062
if (size > cur - start)
mm/gup.c
2063
return size - (cur - start);
mm/gup.c
2119
unsigned long cur;
mm/gup.c
2128
for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
mm/gup.c
2129
if (fixup_user_fault(mm, cur, FAULT_FLAG_WRITE, &unlocked))
mm/gup.c
2133
if (size > cur - start)
mm/gup.c
2134
return size - (cur - start);
mm/gup.c
2151
unsigned long cur;
mm/gup.c
2160
for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE))
mm/gup.c
2161
unsafe_get_user(c, (const char __user *)cur, out);
mm/gup.c
2165
if (size > cur - start)
mm/gup.c
2166
return size - (cur - start);
mm/kfence/kfence_test.c
103
char *cur;
mm/kfence/kfence_test.c
112
cur = expect[0];
mm/kfence/kfence_test.c
116
cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
mm/kfence/kfence_test.c
120
cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
mm/kfence/kfence_test.c
124
cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
mm/kfence/kfence_test.c
127
cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
mm/kfence/kfence_test.c
131
cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
mm/kfence/kfence_test.c
135
scnprintf(cur, end - cur, " in %pS", r->fn);
mm/kfence/kfence_test.c
137
cur = strchr(expect[0], '+');
mm/kfence/kfence_test.c
138
if (cur)
mm/kfence/kfence_test.c
139
*cur = '\0';
mm/kfence/kfence_test.c
142
cur = expect[1];
mm/kfence/kfence_test.c
147
cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
mm/kfence/kfence_test.c
151
cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
mm/kfence/kfence_test.c
155
cur += scnprintf(cur, end - cur, "Corrupted memory at");
mm/kfence/kfence_test.c
158
cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
mm/kfence/kfence_test.c
162
cur += scnprintf(cur, end - cur, "Invalid free of");
mm/kfence/kfence_test.c
166
cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
mm/kfence/report.c
169
const u8 *cur, *end;
mm/kfence/report.c
176
for (cur = (const u8 *)address; cur < end; cur++) {
mm/kfence/report.c
177
if (*cur == KFENCE_CANARY_PATTERN_U8(cur))
mm/kfence/report.c
180
pr_cont(" 0x%02x", *cur);
mm/kmsan/kmsan_test.c
107
cur = expected_header;
mm/kmsan/kmsan_test.c
110
cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type);
mm/kmsan/kmsan_test.c
112
scnprintf(cur, end - cur, " in %s", r->symbol);
mm/kmsan/kmsan_test.c
114
cur = strchr(expected_header, '+');
mm/kmsan/kmsan_test.c
115
if (cur)
mm/kmsan/kmsan_test.c
116
*cur = '\0';
mm/kmsan/kmsan_test.c
98
char *cur;
mm/readahead.c
397
unsigned long cur = ra->size;
mm/readahead.c
399
if (cur < max / 16)
mm/readahead.c
400
return 4 * cur;
mm/readahead.c
401
if (cur <= max / 2)
mm/readahead.c
402
return 2 * cur;
mm/slub.c
3375
void *cur;
mm/slub.c
3401
cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
mm/slub.c
3402
cur = setup_object(s, cur);
mm/slub.c
3403
slab->freelist = cur;
mm/slub.c
3409
set_freepointer(s, cur, next);
mm/slub.c
3410
cur = next;
mm/slub.c
3412
set_freepointer(s, cur, NULL);
mm/vmalloc.c
3175
struct vm_struct *cur, **p;
mm/vmalloc.c
3179
for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
mm/vmalloc.c
3180
if ((unsigned long)cur->addr - addr >= vm->size)
mm/vmalloc.c
3182
addr = ALIGN((unsigned long)cur->addr + cur->size, align);
mm/vmscan.c
5558
char *cur, *next;
mm/vmscan.c
5591
while ((cur = strsep(&next, ",;\n"))) {
mm/vmscan.c
5601
cur = skip_spaces(cur);
mm/vmscan.c
5602
if (!*cur)
mm/vmscan.c
5605
n = sscanf(cur, "%c %llu %u %lu %n %4s %n %lu %n", &cmd, &memcg_id, &nid,
mm/vmscan.c
5607
if (n < 4 || cur[end]) {
net/bridge/br.c
391
bool cur = !!br_opt_get(br, opt);
net/bridge/br.c
394
opt, cur, on);
net/bridge/br.c
396
if (cur == on)
net/bridge/br_private.h
696
static inline bool br_vlan_valid_range(const struct bridge_vlan_info *cur,
net/bridge/br_private.h
701
if (cur->flags & BRIDGE_VLAN_INFO_PVID) {
net/bridge/br_private.h
711
if (cur->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
net/bridge/br_private.h
714
} else if (!(cur->flags & BRIDGE_VLAN_INFO_RANGE_END)) {
net/bridge/br_private.h
717
} else if (cur->vid <= last->vid) {
net/bridge/br_private.h
724
if (!(cur->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
net/ceph/osd_client.c
1946
struct ceph_osd_backoff *cur =
net/ceph/osd_client.c
1950
cmp = hoid_compare(hoid, cur->begin);
net/ceph/osd_client.c
1954
if (hoid_compare(hoid, cur->end) < 0)
net/ceph/osd_client.c
1955
return cur;
net/ceph/osd_client.c
1959
return cur;
net/core/dev.c
8189
int ret, cur = 0;
net/core/dev.c
8212
dev_stack[cur] = now;
net/core/dev.c
8213
iter_stack[cur++] = iter;
net/core/dev.c
8218
if (!cur)
net/core/dev.c
8220
next = dev_stack[--cur];
net/core/dev.c
8221
niter = iter_stack[cur];
net/core/dev.c
8238
int ret, cur = 0;
net/core/dev.c
8258
dev_stack[cur] = now;
net/core/dev.c
8259
iter_stack[cur++] = iter;
net/core/dev.c
8264
if (!cur)
net/core/dev.c
8266
next = dev_stack[--cur];
net/core/dev.c
8267
niter = iter_stack[cur];
net/core/dev.c
8412
int ret, cur = 0;
net/core/dev.c
8432
dev_stack[cur] = now;
net/core/dev.c
8433
iter_stack[cur++] = iter;
net/core/dev.c
8438
if (!cur)
net/core/dev.c
8440
next = dev_stack[--cur];
net/core/dev.c
8441
niter = iter_stack[cur];
net/core/dev.c
8459
int ret, cur = 0;
net/core/dev.c
8482
dev_stack[cur] = now;
net/core/dev.c
8483
iter_stack[cur++] = iter;
net/core/dev.c
8488
if (!cur)
net/core/dev.c
8490
next = dev_stack[--cur];
net/core/dev.c
8491
niter = iter_stack[cur];
net/core/dev.c
8597
int ret, cur = 0;
net/core/dev.c
8617
dev_stack[cur] = now;
net/core/dev.c
8618
iter_stack[cur++] = iter;
net/core/dev.c
8623
if (!cur)
net/core/dev.c
8625
next = dev_stack[--cur];
net/core/dev.c
8626
niter = iter_stack[cur];
net/core/flow_offload.c
418
struct flow_indir_dev_info *cur;
net/core/flow_offload.c
420
list_for_each_entry(cur, &flow_indir_dev_list, list) {
net/core/flow_offload.c
422
bo.command = cur->command;
net/core/flow_offload.c
423
bo.binder_type = cur->binder_type;
net/core/flow_offload.c
425
cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
net/core/flow_offload.c
426
list_splice(&bo.cb_list, cur->cb_list);
net/core/flow_offload.c
553
struct flow_indir_dev_info *cur;
net/core/flow_offload.c
555
list_for_each_entry(cur, &flow_indir_dev_list, list) {
net/core/flow_offload.c
556
if (cur->data == data)
net/core/flow_offload.c
557
return cur;
net/core/net_test.c
150
struct sk_buff *skb, *segs, *cur, *next, *last;
net/core/net_test.c
239
for (cur = segs, i = 0; cur; cur = next, i++) {
net/core/net_test.c
240
next = cur->next;
net/core/net_test.c
242
KUNIT_ASSERT_EQ(test, cur->len, sizeof(hdr) + tcase->segs[i]);
net/core/net_test.c
245
KUNIT_ASSERT_PTR_EQ(test, skb_mac_header(cur), cur->data);
net/core/net_test.c
246
KUNIT_ASSERT_PTR_EQ(test, skb_network_header(cur), cur->data + sizeof(hdr));
net/core/net_test.c
249
KUNIT_ASSERT_EQ(test, memcmp(skb_mac_header(cur), hdr, sizeof(hdr)), 0);
net/core/net_test.c
253
KUNIT_ASSERT_PTR_EQ(test, cur, last);
net/core/net_test.c
255
consume_skb(cur);
net/core/pktgen.c
3443
struct pktgen_dev *cur;
net/core/pktgen.c
3448
cur = list_entry(q, struct pktgen_dev, list);
net/core/pktgen.c
3450
if (!cur->removal_mark)
net/core/pktgen.c
3453
kfree_skb(cur->skb);
net/core/pktgen.c
3454
cur->skb = NULL;
net/core/pktgen.c
3456
pktgen_remove_device(t, cur);
net/core/pktgen.c
3465
struct pktgen_dev *cur;
net/core/pktgen.c
3472
cur = list_entry(q, struct pktgen_dev, list);
net/core/pktgen.c
3474
kfree_skb(cur->skb);
net/core/pktgen.c
3475
cur->skb = NULL;
net/core/pktgen.c
3477
pktgen_remove_device(t, cur);
net/core/sysctl_net_core.c
213
struct sd_flow_limit *cur;
net/core/sysctl_net_core.c
227
len = sizeof(*cur) + netdev_flow_limit_table_len;
net/core/sysctl_net_core.c
230
cur = rcu_dereference_protected(sd->flow_limit,
net/core/sysctl_net_core.c
232
if (cur && !cpumask_test_cpu(i, mask)) {
net/core/sysctl_net_core.c
234
kfree_rcu(cur, rcu);
net/core/sysctl_net_core.c
235
} else if (!cur && cpumask_test_cpu(i, mask)) {
net/core/sysctl_net_core.c
236
cur = kzalloc_node(len, GFP_KERNEL,
net/core/sysctl_net_core.c
238
if (!cur) {
net/core/sysctl_net_core.c
243
cur->log_buckets = ilog2(netdev_flow_limit_table_len);
net/core/sysctl_net_core.c
244
rcu_assign_pointer(sd->flow_limit, cur);
net/devlink/sb.c
481
u32 cur;
net/devlink/sb.c
485
pool_index, &cur, &max);
net/devlink/sb.c
489
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
net/devlink/sb.c
691
u32 cur;
net/devlink/sb.c
697
&cur, &max);
net/devlink/sb.c
701
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
net/ipv4/tcp_ipv4.c
2583
static void *listening_get_next(struct seq_file *seq, void *cur)
net/ipv4/tcp_ipv4.c
2589
struct sock *sk = cur;
net/ipv4/tcp_ipv4.c
2661
static void *established_get_next(struct seq_file *seq, void *cur)
net/ipv4/tcp_ipv4.c
2666
struct sock *sk = cur;
net/ipv4/udp_offload.c
104
if (cur) {
net/ipv4/udp_offload.c
105
refcount_inc(&cur->count);
net/ipv4/udp_offload.c
114
cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++];
net/ipv4/udp_offload.c
115
refcount_set(&cur->count, 1);
net/ipv4/udp_offload.c
116
cur->gro_receive = up->gro_receive;
net/ipv4/udp_offload.c
123
if (WARN_ON_ONCE(!cur))
net/ipv4/udp_offload.c
126
if (!refcount_dec_and_test(&cur->count))
net/ipv4/udp_offload.c
130
*cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr];
net/ipv4/udp_offload.c
81
struct udp_tunnel_type_entry *cur = NULL;
net/ipv4/udp_offload.c
96
cur = &udp_tunnel_gro_types[i];
net/ipv6/ip6_fib.c
2095
struct fib6_info *cur = rcu_dereference_protected(*rtp,
net/ipv6/ip6_fib.c
2097
if (rt == cur) {
net/ipv6/ip6_fib.c
2098
if (fib6_requires_src(cur))
net/ipv6/ip6_fib.c
2103
rtp_next = &cur->fib6_next;
net/ipv6/ndisc.c
184
static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
net/ipv6/ndisc.c
188
if (!cur || !end || cur >= end)
net/ipv6/ndisc.c
190
type = cur->nd_opt_type;
net/ipv6/ndisc.c
192
cur = ((void *)cur) + (cur->nd_opt_len << 3);
net/ipv6/ndisc.c
193
} while (cur < end && cur->nd_opt_type != type);
net/ipv6/ndisc.c
194
return cur <= end && cur->nd_opt_type == type ? cur : NULL;
net/ipv6/ndisc.c
209
struct nd_opt_hdr *cur,
net/ipv6/ndisc.c
212
if (!cur || !end || cur >= end)
net/ipv6/ndisc.c
215
cur = ((void *)cur) + (cur->nd_opt_len << 3);
net/ipv6/ndisc.c
216
} while (cur < end && !ndisc_is_useropt(dev, cur));
net/ipv6/ndisc.c
217
return cur <= end && ndisc_is_useropt(dev, cur) ? cur : NULL;
net/mac80211/rc80211_minstrel_ht.c
705
u16 cur;
net/mac80211/rc80211_minstrel_ht.c
712
cur = rates[i];
net/mac80211/rc80211_minstrel_ht.c
714
return cur;
net/mac80211/rc80211_minstrel_ht.c
791
u16 cur = mi->sample[type].sample_rates[i];
net/mac80211/rc80211_minstrel_ht.c
793
if (cur == idx)
net/mac80211/rc80211_minstrel_ht.c
796
if (!cur)
net/mac80211/rc80211_minstrel_ht.c
813
u16 cur;
net/mac80211/rc80211_minstrel_ht.c
815
cur = rates[i];
net/mac80211/rc80211_minstrel_ht.c
816
if (!cur)
net/mac80211/rc80211_minstrel_ht.c
819
duration = minstrel_get_duration(cur);
net/mac80211/rc80211_minstrel_ht.c
842
rates[j++] = cur;
net/mac80211/sta_info.c
2397
sta->sta.cur = &sta->sta.deflink.agg;
net/mac80211/sta_info.c
2414
sta->cur = sta->sta.deflink.agg;
net/mac80211/sta_info.c
2419
sta->cur.max_amsdu_len =
net/mac80211/sta_info.c
2420
min(sta->cur.max_amsdu_len,
net/mac80211/sta_info.c
2422
sta->cur.max_rc_amsdu_len =
net/mac80211/sta_info.c
2423
min(sta->cur.max_rc_amsdu_len,
net/mac80211/sta_info.c
2426
for (i = 0; i < ARRAY_SIZE(sta->cur.max_tid_amsdu_len); i++)
net/mac80211/sta_info.c
2427
sta->cur.max_tid_amsdu_len[i] =
net/mac80211/sta_info.c
2428
min(sta->cur.max_tid_amsdu_len[i],
net/mac80211/sta_info.c
2433
sta->sta.cur = &sta->cur;
net/mac80211/sta_info.c
639
sta->sta.cur = &sta->sta.deflink.agg;
net/mac80211/sta_info.h
771
struct ieee80211_sta_aggregates cur;
net/mac80211/tx.c
3420
int max_amsdu_len = sta->sta.cur->max_amsdu_len;
net/mac80211/tx.c
3449
if (sta->sta.cur->max_rc_amsdu_len)
net/mac80211/tx.c
3451
sta->sta.cur->max_rc_amsdu_len);
net/mac80211/tx.c
3453
if (sta->sta.cur->max_tid_amsdu_len[tid])
net/mac80211/tx.c
3455
sta->sta.cur->max_tid_amsdu_len[tid]);
net/mac80211/util.c
3893
s32 cur;
net/mac80211/util.c
3901
cur = data->desc[i].start - tsf;
net/mac80211/util.c
3902
if (cur > *offset)
net/mac80211/util.c
3905
cur = data->desc[i].start + data->desc[i].duration - tsf;
net/mac80211/util.c
3906
if (cur > *offset)
net/mac80211/util.c
3907
*offset = cur;
net/mptcp/pm.c
121
struct mptcp_addr_info cur;
net/mptcp/pm.c
127
mptcp_local_address(skc, &cur);
net/mptcp/pm.c
128
if (mptcp_addresses_equal(&cur, saddr, saddr->port))
net/mptcp/pm_kernel.c
102
struct mptcp_addr_info cur;
net/mptcp/pm_kernel.c
111
mptcp_remote_address((struct sock_common *)ssk, &cur);
net/mptcp/pm_kernel.c
112
if (mptcp_addresses_equal(&cur, daddr, daddr->port))
net/mptcp/pm_kernel.c
1267
struct mptcp_pm_addr_entry *cur;
net/mptcp/pm_kernel.c
1269
cur = list_entry(list->next,
net/mptcp/pm_kernel.c
1271
list_del_rcu(&cur->list);
net/mptcp/pm_kernel.c
1272
__mptcp_pm_release_addr_entry(cur);
net/mptcp/pm_kernel.c
725
struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
net/mptcp/pm_kernel.c
749
list_for_each_entry(cur, &pernet->endp_list, list) {
net/mptcp/pm_kernel.c
750
if (mptcp_addresses_equal(&cur->addr, &entry->addr,
net/mptcp/pm_kernel.c
751
cur->addr.port || entry->addr.port)) {
net/mptcp/pm_kernel.c
756
if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) {
net/mptcp/pm_kernel.c
770
ret = cur->addr.id;
net/mptcp/pm_kernel.c
775
entry->addr.id = cur->addr.id;
net/mptcp/pm_kernel.c
776
list_del_rcu(&cur->list);
net/mptcp/pm_kernel.c
777
del_entry = cur;
net/mptcp/protocol.c
2450
struct mptcp_data_frag *cur, *rtx_head;
net/mptcp/protocol.c
2476
list_for_each_entry(cur, &msk->rtx_queue, list) {
net/mptcp/protocol.c
2477
if (!cur->already_sent)
net/mptcp/protocol.c
2479
cur->already_sent = 0;
net/mptcp/protocol.h
428
struct mptcp_data_frag *cur;
net/mptcp/protocol.h
430
cur = msk->first_pending;
net/mptcp/protocol.h
431
return list_is_last(&cur->list, &msk->rtx_queue) ? NULL :
net/mptcp/protocol.h
432
list_next_entry(cur, list);
net/netfilter/nf_conntrack_h323_asn1.c
100
#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
net/netfilter/nf_conntrack_h323_asn1.c
101
#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
net/netfilter/nf_conntrack_h323_asn1.c
102
#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
net/netfilter/nf_conntrack_h323_asn1.c
154
v = *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
159
v += *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
172
if (bs->cur + bytes > bs->end)
net/netfilter/nf_conntrack_h323_asn1.c
180
unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
net/netfilter/nf_conntrack_h323_asn1.c
192
v = (*bs->cur) & (0xffU >> bs->bit);
net/netfilter/nf_conntrack_h323_asn1.c
199
bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
204
v += *(++bs->cur);
net/netfilter/nf_conntrack_h323_asn1.c
223
v = (unsigned int)(*bs->cur) << (bs->bit + 24);
net/netfilter/nf_conntrack_h323_asn1.c
226
v = (unsigned int)(*bs->cur++) << (bs->bit + 24);
net/netfilter/nf_conntrack_h323_asn1.c
231
v |= (unsigned int)(*bs->cur++) << shift;
net/netfilter/nf_conntrack_h323_asn1.c
234
v |= (unsigned int)(*bs->cur) << shift;
net/netfilter/nf_conntrack_h323_asn1.c
238
v |= (*bs->cur) >> (8 - bs->bit);
net/netfilter/nf_conntrack_h323_asn1.c
258
v |= *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
262
v |= *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
266
v |= *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
270
v |= *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
306
len = *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
307
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
324
bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
328
bs->cur += 2;
net/netfilter/nf_conntrack_h323_asn1.c
342
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
349
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
394
len = (*bs->cur++) << 8;
net/netfilter/nf_conntrack_h323_asn1.c
395
len += (*bs->cur++) + f->lb;
net/netfilter/nf_conntrack_h323_asn1.c
407
bs->cur += len >> 3;
net/netfilter/nf_conntrack_h323_asn1.c
450
bs->cur[0], bs->cur[1],
net/netfilter/nf_conntrack_h323_asn1.c
451
bs->cur[2], bs->cur[3],
net/netfilter/nf_conntrack_h323_asn1.c
452
bs->cur[4] * 256 + bs->cur[5]));
net/netfilter/nf_conntrack_h323_asn1.c
454
bs->cur - bs->buf;
net/netfilter/nf_conntrack_h323_asn1.c
463
len = (*bs->cur++) + f->lb;
net/netfilter/nf_conntrack_h323_asn1.c
479
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
500
len = (*bs->cur++) + f->lb;
net/netfilter/nf_conntrack_h323_asn1.c
510
bs->cur += len << 1;
net/netfilter/nf_conntrack_h323_asn1.c
567
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
570
beg = bs->cur;
net/netfilter/nf_conntrack_h323_asn1.c
578
bs->cur = beg + len;
net/netfilter/nf_conntrack_h323_asn1.c
613
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
634
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
637
beg = bs->cur;
net/netfilter/nf_conntrack_h323_asn1.c
644
bs->cur = beg + len;
net/netfilter/nf_conntrack_h323_asn1.c
669
count = *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
675
count = *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
677
count += *bs->cur++;
net/netfilter/nf_conntrack_h323_asn1.c
715
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
718
beg = bs->cur;
net/netfilter/nf_conntrack_h323_asn1.c
728
bs->cur = beg + len;
net/netfilter/nf_conntrack_h323_asn1.c
788
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
809
bs->cur += len;
net/netfilter/nf_conntrack_h323_asn1.c
812
beg = bs->cur;
net/netfilter/nf_conntrack_h323_asn1.c
818
bs->cur = beg + len;
net/netfilter/nf_conntrack_h323_asn1.c
835
bs.buf = bs.beg = bs.cur = buf;
net/netfilter/nf_conntrack_h323_asn1.c
852
bs.beg = bs.cur = beg;
net/netfilter/nf_conntrack_h323_asn1.c
869
bs.buf = bs.beg = bs.cur = buf;
net/netfilter/nf_conntrack_h323_asn1.c
95
unsigned char *cur;
net/netfilter/nf_conntrack_helper.c
116
struct nf_conntrack_nat_helper *cur;
net/netfilter/nf_conntrack_helper.c
119
list_for_each_entry_rcu(cur, &nf_ct_nat_helpers, list) {
net/netfilter/nf_conntrack_helper.c
120
if (!strcmp(cur->mod_name, mod_name)) {
net/netfilter/nf_conntrack_helper.c
125
return found ? cur : NULL;
net/netfilter/nf_conntrack_helper.c
290
struct nf_ct_helper_expectfn *cur;
net/netfilter/nf_conntrack_helper.c
293
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
net/netfilter/nf_conntrack_helper.c
294
if (!strcmp(cur->name, name)) {
net/netfilter/nf_conntrack_helper.c
299
return found ? cur : NULL;
net/netfilter/nf_conntrack_helper.c
307
struct nf_ct_helper_expectfn *cur;
net/netfilter/nf_conntrack_helper.c
310
list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
net/netfilter/nf_conntrack_helper.c
311
if (cur->expectfn == symbol) {
net/netfilter/nf_conntrack_helper.c
316
return found ? cur : NULL;
net/netfilter/nf_conntrack_helper.c
351
struct nf_conntrack_helper *cur;
net/netfilter/nf_conntrack_helper.c
366
hlist_for_each_entry(cur, &nf_ct_helper_hash[i], hnode) {
net/netfilter/nf_conntrack_helper.c
367
if (!strcmp(cur->name, me->name) &&
net/netfilter/nf_conntrack_helper.c
368
(cur->tuple.src.l3num == NFPROTO_UNSPEC ||
net/netfilter/nf_conntrack_helper.c
369
cur->tuple.src.l3num == me->tuple.src.l3num) &&
net/netfilter/nf_conntrack_helper.c
370
cur->tuple.dst.protonum == me->tuple.dst.protonum) {
net/netfilter/nf_conntrack_helper.c
379
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
net/netfilter/nf_conntrack_helper.c
380
if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
net/netfilter/nf_tables_api.c
10227
track.cur = expr;
net/netfilter/nf_tables_api.c
10230
expr = track.cur;
net/netfilter/nfnetlink_acct.c
198
struct nf_acct *cur, *last;
net/netfilter/nfnetlink_acct.c
209
list_for_each_entry_rcu(cur, &nfnl_acct_net->nfnl_acct_list, head) {
net/netfilter/nfnetlink_acct.c
211
if (cur != last)
net/netfilter/nfnetlink_acct.c
217
if (filter && (cur->flags & filter->mask) != filter->value)
net/netfilter/nfnetlink_acct.c
223
NFNL_MSG_ACCT_NEW, cur) < 0) {
net/netfilter/nfnetlink_acct.c
224
cb->args[1] = (unsigned long)cur;
net/netfilter/nfnetlink_acct.c
279
struct nf_acct *cur;
net/netfilter/nfnetlink_acct.c
297
list_for_each_entry(cur, &nfnl_acct_net->nfnl_acct_list, head) {
net/netfilter/nfnetlink_acct.c
300
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
net/netfilter/nfnetlink_acct.c
312
NFNL_MSG_ACCT_NEW, cur);
net/netfilter/nfnetlink_acct.c
326
static int nfnl_acct_try_del(struct nf_acct *cur)
net/netfilter/nfnetlink_acct.c
333
if (refcount_dec_if_one(&cur->refcnt)) {
net/netfilter/nfnetlink_acct.c
335
list_del_rcu(&cur->head);
net/netfilter/nfnetlink_acct.c
336
kfree_rcu(cur, rcu_head);
net/netfilter/nfnetlink_acct.c
347
struct nf_acct *cur, *tmp;
net/netfilter/nfnetlink_acct.c
352
list_for_each_entry_safe(cur, tmp, &nfnl_acct_net->nfnl_acct_list, head)
net/netfilter/nfnetlink_acct.c
353
nfnl_acct_try_del(cur);
net/netfilter/nfnetlink_acct.c
359
list_for_each_entry(cur, &nfnl_acct_net->nfnl_acct_list, head) {
net/netfilter/nfnetlink_acct.c
360
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
net/netfilter/nfnetlink_acct.c
363
ret = nfnl_acct_try_del(cur);
net/netfilter/nfnetlink_acct.c
420
struct nf_acct *cur, *acct = NULL;
net/netfilter/nfnetlink_acct.c
423
list_for_each_entry_rcu(cur, &nfnl_acct_net->nfnl_acct_list, head) {
net/netfilter/nfnetlink_acct.c
424
if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
net/netfilter/nfnetlink_acct.c
430
if (!refcount_inc_not_zero(&cur->refcnt)) {
net/netfilter/nfnetlink_acct.c
435
acct = cur;
net/netfilter/nfnetlink_acct.c
513
struct nf_acct *cur, *tmp;
net/netfilter/nfnetlink_acct.c
515
list_for_each_entry_safe(cur, tmp, &nfnl_acct_net->nfnl_acct_list, head) {
net/netfilter/nfnetlink_acct.c
516
list_del_rcu(&cur->head);
net/netfilter/nfnetlink_acct.c
518
if (refcount_dec_and_test(&cur->refcnt))
net/netfilter/nfnetlink_acct.c
519
kfree_rcu(cur, rcu_head);
net/netfilter/nfnetlink_cthelper.c
421
struct nf_conntrack_helper *cur, *helper = NULL;
net/netfilter/nfnetlink_cthelper.c
439
cur = &nlcth->helper;
net/netfilter/nfnetlink_cthelper.c
441
if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
net/netfilter/nfnetlink_cthelper.c
444
if ((tuple.src.l3num != cur->tuple.src.l3num ||
net/netfilter/nfnetlink_cthelper.c
445
tuple.dst.protonum != cur->tuple.dst.protonum))
net/netfilter/nfnetlink_cthelper.c
451
helper = cur;
net/netfilter/nfnetlink_cthelper.c
577
struct nf_conntrack_helper *cur, *last;
net/netfilter/nfnetlink_cthelper.c
583
hlist_for_each_entry_rcu(cur,
net/netfilter/nfnetlink_cthelper.c
587
if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
net/netfilter/nfnetlink_cthelper.c
591
if (cur != last)
net/netfilter/nfnetlink_cthelper.c
599
NFNL_MSG_CTHELPER_NEW, cur) < 0) {
net/netfilter/nfnetlink_cthelper.c
600
cb->args[1] = (unsigned long)cur;
net/netfilter/nfnetlink_cthelper.c
618
struct nf_conntrack_helper *cur;
net/netfilter/nfnetlink_cthelper.c
647
cur = &nlcth->helper;
net/netfilter/nfnetlink_cthelper.c
649
strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
net/netfilter/nfnetlink_cthelper.c
653
(tuple.src.l3num != cur->tuple.src.l3num ||
net/netfilter/nfnetlink_cthelper.c
654
tuple.dst.protonum != cur->tuple.dst.protonum))
net/netfilter/nfnetlink_cthelper.c
666
NFNL_MSG_CTHELPER_NEW, cur);
net/netfilter/nfnetlink_cthelper.c
683
struct nf_conntrack_helper *cur;
net/netfilter/nfnetlink_cthelper.c
705
cur = &nlcth->helper;
net/netfilter/nfnetlink_cthelper.c
709
strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
net/netfilter/nfnetlink_cthelper.c
713
(tuple.src.l3num != cur->tuple.src.l3num ||
net/netfilter/nfnetlink_cthelper.c
714
tuple.dst.protonum != cur->tuple.dst.protonum))
net/netfilter/nfnetlink_cthelper.c
717
if (refcount_dec_if_one(&cur->refcnt)) {
net/netfilter/nfnetlink_cthelper.c
719
nf_conntrack_helper_unregister(cur);
net/netfilter/nfnetlink_cthelper.c
720
kfree(cur->expect_policy);
net/netfilter/nfnetlink_cthelper.c
787
struct nf_conntrack_helper *cur;
net/netfilter/nfnetlink_cthelper.c
793
cur = &nlcth->helper;
net/netfilter/nfnetlink_cthelper.c
795
nf_conntrack_helper_unregister(cur);
net/netfilter/nfnetlink_cthelper.c
796
kfree(cur->expect_policy);
net/netfilter/nfnetlink_cttimeout.c
230
struct ctnl_timeout *cur, *last;
net/netfilter/nfnetlink_cttimeout.c
241
list_for_each_entry_rcu(cur, &pernet->nfct_timeout_list, head) {
net/netfilter/nfnetlink_cttimeout.c
243
if (cur != last)
net/netfilter/nfnetlink_cttimeout.c
251
IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
net/netfilter/nfnetlink_cttimeout.c
252
cb->args[1] = (unsigned long)cur;
net/netfilter/nfnetlink_cttimeout.c
269
struct ctnl_timeout *cur;
net/netfilter/nfnetlink_cttimeout.c
282
list_for_each_entry(cur, &pernet->nfct_timeout_list, head) {
net/netfilter/nfnetlink_cttimeout.c
285
if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
net/netfilter/nfnetlink_cttimeout.c
297
IPCTNL_MSG_TIMEOUT_NEW, cur);
net/netfilter/nfnetlink_cttimeout.c
334
struct ctnl_timeout *cur, *tmp;
net/netfilter/nfnetlink_cttimeout.c
339
list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list,
net/netfilter/nfnetlink_cttimeout.c
341
ctnl_timeout_try_del(info->net, cur);
net/netfilter/nfnetlink_cttimeout.c
347
list_for_each_entry(cur, &pernet->nfct_timeout_list, head) {
net/netfilter/nfnetlink_cttimeout.c
348
if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
net/netfilter/nfnetlink_cttimeout.c
351
ret = ctnl_timeout_try_del(info->net, cur);
net/netfilter/nfnetlink_cttimeout.c
590
struct ctnl_timeout *cur, *tmp;
net/netfilter/nfnetlink_cttimeout.c
592
list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_list, head) {
net/netfilter/nfnetlink_cttimeout.c
593
list_del_rcu(&cur->head);
net/netfilter/nfnetlink_cttimeout.c
594
list_add(&cur->free_head, &pernet->nfct_timeout_freelist);
net/netfilter/nfnetlink_cttimeout.c
603
struct ctnl_timeout *cur, *tmp;
net/netfilter/nfnetlink_cttimeout.c
610
list_for_each_entry_safe(cur, tmp, &pernet->nfct_timeout_freelist, free_head) {
net/netfilter/nfnetlink_cttimeout.c
611
list_del(&cur->free_head);
net/netfilter/nfnetlink_cttimeout.c
613
if (refcount_dec_and_test(&cur->refcnt))
net/netfilter/nfnetlink_cttimeout.c
614
kfree_rcu(cur, rcu_head);
net/netfilter/nft_bitwise.c
419
track->cur = expr;
net/netfilter/nft_bitwise.c
568
track->cur = expr;
net/netfilter/x_tables.c
685
if (xp->cur >= xp->number)
net/netfilter/x_tables.c
688
if (xp->cur)
net/netfilter/x_tables.c
689
delta += xp->compat_tab[xp->cur - 1].delta;
net/netfilter/x_tables.c
690
xp->compat_tab[xp->cur].offset = offset;
net/netfilter/x_tables.c
691
xp->compat_tab[xp->cur].delta = delta;
net/netfilter/x_tables.c
692
xp->cur++;
net/netfilter/x_tables.c
705
xt[af].cur = 0;
net/netfilter/x_tables.c
713
int mid, left = 0, right = xt[af].cur - 1;
net/netfilter/x_tables.c
73
unsigned int cur; /* number of used slots in compat_tab[] */
net/netfilter/x_tables.c
749
xt[af].cur = 0;
net/rds/ib_rdma.c
320
struct llist_node *cur = NULL;
net/rds/ib_rdma.c
324
cur = &ibmr->llnode;
net/rds/ib_rdma.c
325
*next = cur;
net/rds/ib_rdma.c
326
next = &cur->next;
net/rds/ib_rdma.c
329
*nodes_tail = cur;
net/rfkill/core.c
116
bool cur, sav;
net/rfkill/core.c
393
rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
net/rfkill/core.c
402
rfkill_global_states[type].cur = blocked;
net/rfkill/core.c
407
rfkill_global_states[i].cur = blocked;
net/rfkill/core.c
483
rfkill_global_states[i].sav = rfkill_global_states[i].cur;
net/rfkill/core.c
484
rfkill_global_states[i].cur = true;
net/rfkill/core.c
551
return rfkill_global_states[type].cur;
net/rfkill/core.c
936
bool cur;
net/rfkill/core.c
944
cur = !!(rfkill->state & RFKILL_BLOCK_SW);
net/rfkill/core.c
945
rfkill_set_block(rfkill, cur);
net/sched/act_pedit.c
257
u32 cur = nparms->tcfp_keys[i].off;
net/sched/act_pedit.c
260
if (!offmask && cur % 4) {
net/sched/act_pedit.c
274
cur += (0xff & offmask) >> nparms->tcfp_keys[i].shift;
net/sched/act_pedit.c
278
max(nparms->tcfp_off_max_hint, cur + 4);
net/sched/sch_taprio.c
124
struct sched_entry *entry, *cur;
net/sched/sch_taprio.c
134
cur = entry;
net/sched/sch_taprio.c
144
if (cur->gate_mask & BIT(tc))
net/sched/sch_taprio.c
145
entry->gate_duration[tc] += cur->interval;
net/sched/sch_taprio.c
150
cur = list_next_entry_circular(cur, &sched->entries, list);
net/sched/sch_taprio.c
151
} while (cur != entry);
net/shaper/shaper.c
1056
struct net_shaper *cur, *leaves, node = {};
net/shaper/shaper.c
1066
cur = net_shaper_lookup(binding, &node.handle);
net/shaper/shaper.c
1067
if (cur) {
net/shaper/shaper.c
1068
node = *cur;
net/shaper/shaper.c
1083
xa_for_each(&hierarchy->shapers, index, cur) {
net/shaper/shaper.c
1084
if (net_shaper_handle_cmp(&cur->parent, &shaper->handle))
net/shaper/shaper.c
1092
leaves[leaves_count++] = *cur;
net/shaper/shaper.c
1289
unsigned long cur;
net/shaper/shaper.c
1300
for (cur = NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS;
net/shaper/shaper.c
1301
cur <= NET_SHAPER_A_CAPS_MAX; ++cur) {
net/shaper/shaper.c
1302
if (flags & BIT(cur) && nla_put_flag(msg, cur))
net/shaper/shaper.c
1379
struct net_shaper *cur;
net/shaper/shaper.c
1386
xa_for_each(&hierarchy->shapers, index, cur) {
net/shaper/shaper.c
1388
kfree(cur);
net/shaper/shaper.c
334
struct net_shaper *prev, *cur;
net/shaper/shaper.c
342
cur = xa_load(&hierarchy->shapers, index);
net/shaper/shaper.c
343
if (cur)
net/shaper/shaper.c
367
cur = kzalloc_obj(*cur);
net/shaper/shaper.c
368
if (!cur) {
net/shaper/shaper.c
377
prev = __xa_store(&hierarchy->shapers, index, cur, GFP_KERNEL);
net/shaper/shaper.c
382
kfree_rcu(cur, rcu);
net/shaper/shaper.c
401
struct net_shaper *cur;
net/shaper/shaper.c
409
cur = xa_load(&hierarchy->shapers, index);
net/shaper/shaper.c
410
if (WARN_ON_ONCE(!cur))
net/shaper/shaper.c
418
*cur = shapers[i];
net/shaper/shaper.c
427
struct net_shaper *cur;
net/shaper/shaper.c
434
xa_for_each_marked(&hierarchy->shapers, index, cur,
net/shaper/shaper.c
437
kfree(cur);
net/smc/smc_core.c
108
struct smc_connection *cur = rb_entry(*link,
net/smc/smc_core.c
112
if (cur->alert_token_local > token)
net/smc/smc_core.h
469
struct smc_connection *cur = rb_entry(node,
net/smc/smc_core.h
472
if (cur->alert_token_local > token) {
net/smc/smc_core.h
475
if (cur->alert_token_local < token) {
net/smc/smc_core.h
478
res = cur;
net/sunrpc/xdr.c
1521
unsigned int cur = xdr_stream_pos(xdr);
net/sunrpc/xdr.c
1525
if (iov->iov_len > cur) {
net/sunrpc/xdr.c
1526
copied = xdr_shrink_bufhead(buf, cur);
net/sunrpc/xdr.c
1527
trace_rpc_xdr_alignment(xdr, cur, copied);
net/sunrpc/xdr.c
1823
size_t cur;
net/sunrpc/xdr.c
1827
cur = min_t(size_t, buf->tail[0].iov_len, trim);
net/sunrpc/xdr.c
1828
buf->tail[0].iov_len -= cur;
net/sunrpc/xdr.c
1829
trim -= cur;
net/sunrpc/xdr.c
1835
cur = min_t(unsigned int, buf->page_len, trim);
net/sunrpc/xdr.c
1836
buf->page_len -= cur;
net/sunrpc/xdr.c
1837
trim -= cur;
net/sunrpc/xdr.c
1843
cur = min_t(size_t, buf->head[0].iov_len, trim);
net/sunrpc/xdr.c
1844
buf->head[0].iov_len -= cur;
net/sunrpc/xdr.c
1845
trim -= cur;
net/sunrpc/xprtmultipath.c
298
const struct rpc_xprt *cur,
net/sunrpc/xprtmultipath.c
305
if (cur == pos)
net/sunrpc/xprtmultipath.c
316
const struct rpc_xprt *cur)
net/sunrpc/xprtmultipath.c
318
return _xprt_switch_find_current_entry(head, cur, true);
net/sunrpc/xprtmultipath.c
325
const struct rpc_xprt *cur))
net/sunrpc/xprtmultipath.c
347
const struct rpc_xprt *cur)
net/sunrpc/xprtmultipath.c
349
return _xprt_switch_find_current_entry(head, cur, false);
net/sunrpc/xprtmultipath.c
395
const struct rpc_xprt *cur, bool check_active)
net/sunrpc/xprtmultipath.c
401
if (cur == prev)
net/sunrpc/xprtmultipath.c
441
const struct rpc_xprt *cur)
net/sunrpc/xprtmultipath.c
445
ret = xprt_switch_find_next_entry(head, cur, true);
net/sunrpc/xprtmultipath.c
453
const struct rpc_xprt *cur)
net/sunrpc/xprtmultipath.c
462
xprt = __xprt_switch_find_next_entry_roundrobin(head, cur);
net/sunrpc/xprtmultipath.c
471
cur = xprt;
net/sunrpc/xprtmultipath.c
485
const struct rpc_xprt *cur)
net/sunrpc/xprtmultipath.c
487
return xprt_switch_find_next_entry(&xps->xps_xprt_list, cur, true);
net/sunrpc/xprtmultipath.c
492
const struct rpc_xprt *cur)
net/sunrpc/xprtmultipath.c
494
return xprt_switch_find_next_entry(&xps->xps_xprt_list, cur, false);
net/tipc/crypto.c
1396
u8 cur, new;
net/tipc/crypto.c
1429
cur = atomic_read(&rx->peer_rx_active);
net/tipc/crypto.c
1432
cur != new &&
net/tipc/crypto.c
1433
atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
net/tipc/crypto.c
1436
if (cur)
net/tipc/crypto.c
1437
tipc_aead_users_dec(tx->aead[cur], 0);
net/tipc/crypto.c
1444
tx->name, cur, new, rx->name);
net/tipc/crypto.c
313
#define key_next(cur) ((cur) % KEY_MAX + 1)
net/tipc/crypto.c
470
int cur;
net/tipc/crypto.c
476
cur = atomic_read(&tmp->users);
net/tipc/crypto.c
477
if (cur == val)
net/tipc/crypto.c
479
} while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
net/tipc/monitor.c
394
struct tipc_peer *cur, *prev, *p;
net/tipc/monitor.c
408
list_for_each_entry(cur, &self->list, list) {
net/tipc/monitor.c
409
if ((addr > prev->addr) && (addr < cur->addr))
net/tipc/monitor.c
411
if (((addr < cur->addr) || (addr > prev->addr)) &&
net/tipc/monitor.c
412
(prev->addr > cur->addr))
net/tipc/monitor.c
414
prev = cur;
net/tipc/monitor.c
416
list_add_tail(&p->list, &cur->list);
net/xfrm/xfrm_input.c
78
const struct xfrm_input_afinfo *cur;
net/xfrm/xfrm_input.c
80
cur = rcu_access_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family]);
net/xfrm/xfrm_input.c
81
if (unlikely(cur != afinfo))
samples/bpf/xdp_sample_user.c
1437
static void sample_stats_print(int mask, struct stats_record *cur,
samples/bpf/xdp_sample_user.c
1443
stats_get_rx_cnt(cur, prev, 0, &out);
samples/bpf/xdp_sample_user.c
1445
stats_get_redirect_cnt(cur, prev, 0, &out);
samples/bpf/xdp_sample_user.c
1447
stats_get_redirect_err_cnt(cur, prev, 0, &out);
samples/bpf/xdp_sample_user.c
1449
stats_get_exception_cnt(cur, prev, 0, &out);
samples/bpf/xdp_sample_user.c
1451
stats_get_devmap_xmit(cur, prev, 0, &out);
samples/bpf/xdp_sample_user.c
1453
stats_get_devmap_xmit_multi(cur, prev, 0, &out,
samples/bpf/xdp_sample_user.c
1457
stats_print(prog_name, mask, cur, prev, &out);
samples/vfio-mdev/mtty.c
1116
enum vfio_device_mig_state cur = mdev_state->state;
samples/vfio-mdev/mtty.c
1118
dev_dbg(mdev_state->vdev.dev, "%s: %d -> %d\n", __func__, cur, new);
samples/vfio-mdev/mtty.c
1131
if ((cur == VFIO_DEVICE_STATE_RUNNING &&
samples/vfio-mdev/mtty.c
1133
(cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
samples/vfio-mdev/mtty.c
1136
(cur == VFIO_DEVICE_STATE_PRE_COPY &&
samples/vfio-mdev/mtty.c
1138
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
samples/vfio-mdev/mtty.c
1140
(cur == VFIO_DEVICE_STATE_STOP &&
samples/vfio-mdev/mtty.c
1154
if (cur == VFIO_DEVICE_STATE_RESUMING &&
samples/vfio-mdev/mtty.c
1165
if ((cur == VFIO_DEVICE_STATE_PRE_COPY &&
samples/vfio-mdev/mtty.c
1167
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
samples/vfio-mdev/mtty.c
1169
(cur == VFIO_DEVICE_STATE_STOP_COPY &&
samples/vfio-mdev/mtty.c
1184
if ((cur == VFIO_DEVICE_STATE_RUNNING &&
samples/vfio-mdev/mtty.c
1186
(cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
samples/vfio-mdev/mtty.c
1188
(cur == VFIO_DEVICE_STATE_STOP &&
samples/vfio-mdev/mtty.c
1190
(cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
samples/vfio-mdev/mtty.c
1206
if (cur == VFIO_DEVICE_STATE_STOP &&
samples/vfs/test-list-all-mounts.c
119
for (size_t cur = 0; cur < nr_mounts; cur++) {
samples/vfs/test-list-all-mounts.c
122
last_mnt_id = list[cur];
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
341
#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
344
| (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
346
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
349
| ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
350
#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
scripts/dtc/include-prefixes/dt-bindings/usb/pd.h
353
| ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
scripts/genksyms/genksyms.c
557
struct string_list *cur;
scripts/genksyms/genksyms.c
560
cur = *(b++);
scripts/genksyms/genksyms.c
561
switch (cur->tag) {
scripts/genksyms/genksyms.c
564
fprintf(debugfile, "%s ", cur->string);
scripts/genksyms/genksyms.c
565
crc = partial_crc32(cur->string, crc);
scripts/genksyms/genksyms.c
571
subsym = find_symbol(cur->string, cur->tag, 0);
scripts/genksyms/genksyms.c
575
fprintf(debugfile, "%s ", cur->string);
scripts/genksyms/genksyms.c
576
crc = partial_crc32(cur->string, crc);
scripts/genksyms/genksyms.c
588
subsym = find_symbol(cur->string, cur->tag, 0);
scripts/genksyms/genksyms.c
593
symbol_types[cur->tag].name,
scripts/genksyms/genksyms.c
594
cur->string);
scripts/genksyms/genksyms.c
596
(symbol_types[cur->tag].name),
scripts/genksyms/genksyms.c
597
mk_node(cur->string),
scripts/genksyms/genksyms.c
602
add_symbol(cur->string, cur->tag, n, 0);
scripts/genksyms/genksyms.c
607
symbol_types[cur->tag].name,
scripts/genksyms/genksyms.c
608
cur->string);
scripts/genksyms/genksyms.c
611
crc = partial_crc32(symbol_types[cur->tag].name,
scripts/genksyms/genksyms.c
614
crc = partial_crc32(cur->string, crc);
scripts/kconfig/nconf.c
610
ITEM *cur;
scripts/kconfig/nconf.c
613
cur = current_item(curses_menu);
scripts/kconfig/nconf.c
614
if (cur == NULL)
scripts/kconfig/nconf.c
616
mcur = (struct mitem *) item_userptr(cur);
scripts/kconfig/nconf.c
627
ITEM *cur;
scripts/kconfig/nconf.c
630
cur = current_item(curses_menu);
scripts/kconfig/nconf.c
631
if (!cur)
scripts/kconfig/nconf.c
633
mcur = (struct mitem *) item_userptr(cur);
security/integrity/ima/ima_policy.c
322
char *cur, *next;
security/integrity/ima/ima_policy.c
330
while ((cur = strsep(&next, "|"))) {
security/integrity/ima/ima_policy.c
332
if (!(*cur)) {
security/integrity/ima/ima_policy.c
362
for (i = 0, cur = src_copy; i < count; i++) {
security/integrity/ima/ima_policy.c
363
opt_list->items[i] = cur;
security/integrity/ima/ima_policy.c
364
cur = strchr(cur, '\0') + 1;
security/loadpin/loadpin.c
221
char *cur;
security/loadpin/loadpin.c
234
cur = exclude_read_files[i];
security/loadpin/loadpin.c
235
if (!cur)
security/loadpin/loadpin.c
237
if (*cur == '\0')
security/loadpin/loadpin.c
241
if (strcmp(cur, kernel_read_file_str[j]) == 0) {
security/selinux/ss/avtab.c
100
cmp = avtab_node_cmp(key, &cur->key);
security/selinux/ss/avtab.c
125
struct avtab_node *prev, *cur;
security/selinux/ss/avtab.c
131
for (prev = NULL, cur = h->htable[hvalue]; cur;
security/selinux/ss/avtab.c
132
prev = cur, cur = cur->next) {
security/selinux/ss/avtab.c
133
cmp = avtab_node_cmp(key, &cur->key);
security/selinux/ss/avtab.c
148
struct avtab_node *cur;
security/selinux/ss/avtab.c
155
for (cur = h->htable[hvalue]; cur; cur = cur->next) {
security/selinux/ss/avtab.c
156
cmp = avtab_node_cmp(key, &cur->key);
security/selinux/ss/avtab.c
158
return cur;
security/selinux/ss/avtab.c
169
struct avtab_node *cur;
security/selinux/ss/avtab.c
176
for (cur = node->next; cur; cur = cur->next) {
security/selinux/ss/avtab.c
177
cmp = avtab_node_cmp(&tmp_key, &cur->key);
security/selinux/ss/avtab.c
179
return cur;
security/selinux/ss/avtab.c
189
struct avtab_node *cur, *temp;
security/selinux/ss/avtab.c
195
cur = h->htable[i];
security/selinux/ss/avtab.c
196
while (cur) {
security/selinux/ss/avtab.c
197
temp = cur;
security/selinux/ss/avtab.c
198
cur = cur->next;
security/selinux/ss/avtab.c
263
struct avtab_node *cur;
security/selinux/ss/avtab.c
269
cur = h->htable[i];
security/selinux/ss/avtab.c
270
if (cur) {
security/selinux/ss/avtab.c
273
while (cur) {
security/selinux/ss/avtab.c
275
cur = cur->next;
security/selinux/ss/avtab.c
520
int avtab_write_item(struct policydb *p, const struct avtab_node *cur, struct policy_file *fp)
security/selinux/ss/avtab.c
523
__le32 buf32[ARRAY_SIZE(cur->datum.u.xperms->perms.p)];
security/selinux/ss/avtab.c
527
buf16[0] = cpu_to_le16(cur->key.source_type);
security/selinux/ss/avtab.c
528
buf16[1] = cpu_to_le16(cur->key.target_type);
security/selinux/ss/avtab.c
529
buf16[2] = cpu_to_le16(cur->key.target_class);
security/selinux/ss/avtab.c
530
buf16[3] = cpu_to_le16(cur->key.specified);
security/selinux/ss/avtab.c
535
if (cur->key.specified & AVTAB_XPERMS) {
security/selinux/ss/avtab.c
536
rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1,
security/selinux/ss/avtab.c
540
rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
security/selinux/ss/avtab.c
543
for (i = 0; i < ARRAY_SIZE(cur->datum.u.xperms->perms.p); i++)
security/selinux/ss/avtab.c
544
buf32[i] = cpu_to_le32(cur->datum.u.xperms->perms.p[i]);
security/selinux/ss/avtab.c
546
ARRAY_SIZE(cur->datum.u.xperms->perms.p), fp);
security/selinux/ss/avtab.c
548
buf32[0] = cpu_to_le32(cur->datum.u.data);
security/selinux/ss/avtab.c
560
struct avtab_node *cur;
security/selinux/ss/avtab.c
569
for (cur = a->htable[i]; cur; cur = cur->next) {
security/selinux/ss/avtab.c
570
rc = avtab_write_item(p, cur, fp);
security/selinux/ss/avtab.c
91
struct avtab_node *prev, *cur, *newnode;
security/selinux/ss/avtab.c
98
for (prev = NULL, cur = h->htable[hvalue]; cur;
security/selinux/ss/avtab.c
99
prev = cur, cur = cur->next) {
security/selinux/ss/avtab.h
115
int avtab_write_item(struct policydb *p, const struct avtab_node *cur,
security/selinux/ss/hashtab.c
101
cur = cur->next;
security/selinux/ss/hashtab.c
112
struct hashtab_node *cur;
security/selinux/ss/hashtab.c
118
cur = h->htable[i];
security/selinux/ss/hashtab.c
119
if (cur) {
security/selinux/ss/hashtab.c
122
while (cur) {
security/selinux/ss/hashtab.c
124
cur = cur->next;
security/selinux/ss/hashtab.c
146
struct hashtab_node *cur, *tmp, *tail;
security/selinux/ss/hashtab.c
185
for (cur = new->htable[i]; cur; cur = tmp) {
security/selinux/ss/hashtab.c
186
tmp = cur->next;
security/selinux/ss/hashtab.c
187
destroy(cur->key, cur->datum, args);
security/selinux/ss/hashtab.c
188
kmem_cache_free(hashtab_node_cachep, cur);
security/selinux/ss/hashtab.c
72
struct hashtab_node *cur, *temp;
security/selinux/ss/hashtab.c
75
cur = h->htable[i];
security/selinux/ss/hashtab.c
76
while (cur) {
security/selinux/ss/hashtab.c
77
temp = cur;
security/selinux/ss/hashtab.c
78
cur = cur->next;
security/selinux/ss/hashtab.c
93
struct hashtab_node *cur;
security/selinux/ss/hashtab.c
96
cur = h->htable[i];
security/selinux/ss/hashtab.c
97
while (cur) {
security/selinux/ss/hashtab.c
98
ret = apply(cur->key, cur->datum, args);
security/selinux/ss/hashtab.h
101
struct hashtab_node *cur;
security/selinux/ss/hashtab.h
107
cur = h->htable[hvalue];
security/selinux/ss/hashtab.h
108
while (cur) {
security/selinux/ss/hashtab.h
109
int cmp = key_params.cmp(key, cur->key);
security/selinux/ss/hashtab.h
112
return cur->datum;
security/selinux/ss/hashtab.h
115
cur = cur->next;
security/selinux/ss/hashtab.h
66
struct hashtab_node *prev, *cur;
security/selinux/ss/hashtab.h
75
cur = h->htable[hvalue];
security/selinux/ss/hashtab.h
76
while (cur) {
security/selinux/ss/hashtab.h
77
int cmp = key_params.cmp(key, cur->key);
security/selinux/ss/hashtab.h
83
prev = cur;
security/selinux/ss/hashtab.h
84
cur = cur->next;
sound/core/seq/seq_clientmgr.c
1147
info->cur_clients = client_usage.cur;
sound/core/seq/seq_clientmgr.c
179
res->cur += num;
sound/core/seq/seq_clientmgr.c
180
if (res->cur > res->peak)
sound/core/seq/seq_clientmgr.c
181
res->peak = res->cur;
sound/core/seq/seq_clientmgr.c
186
res->cur -= num;
sound/core/seq/seq_clientmgr.c
2594
snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur);
sound/core/seq/seq_clientmgr.h
69
int cur;
sound/core/seq/seq_prioq.c
134
struct snd_seq_event_cell *cur, *prev;
sound/core/seq/seq_prioq.c
163
cur = f->head; /* cursor */
sound/core/seq/seq_prioq.c
166
while (cur != NULL) {
sound/core/seq/seq_prioq.c
168
int rel = compare_timestamp_rel(&cell->event, &cur->event);
sound/core/seq/seq_prioq.c
177
prev = cur;
sound/core/seq/seq_prioq.c
178
cur = cur->next;
sound/core/seq/seq_prioq.c
188
cell->next = cur;
sound/core/seq/seq_prioq.c
190
if (f->head == cur) /* this is the first cell, set head to it */
sound/core/seq/seq_prioq.c
192
if (cur == NULL) /* reached end of the list */
sound/hda/codecs/generic.c
4074
static bool dyn_adc_pcm_resetup(struct hda_codec *codec, int cur);
sound/hda/codecs/generic.c
5627
static bool dyn_adc_pcm_resetup(struct hda_codec *codec, int cur)
sound/hda/codecs/generic.c
5630
hda_nid_t new_adc = spec->adc_nids[spec->dyn_adc_idx[cur]];
sound/isa/es18xx.c
1137
unsigned char cur = inb(chip->port + ES18XX_PM);
sound/isa/es18xx.c
1139
if ((cur & mask) == val)
sound/isa/es18xx.c
1141
outb((cur & ~mask) | val, chip->port + ES18XX_PM);
sound/pci/pcxhr/pcxhr.c
1224
int cur = rmh.stat[0];
sound/pci/pcxhr/pcxhr.c
1235
cur = 100 - (100 * cur) / ref;
sound/pci/pcxhr/pcxhr.c
1236
snd_iprintf(buffer, "cpu load %d%%\n", cur);
sound/soc/bcm/cygnus-pcm.c
667
unsigned int res = 0, cur = 0, base = 0;
sound/soc/bcm/cygnus-pcm.c
678
cur = readl(aio->cygaud->audio + p_rbuf->rdaddr);
sound/soc/bcm/cygnus-pcm.c
680
cur = readl(aio->cygaud->audio + p_rbuf->wraddr);
sound/soc/bcm/cygnus-pcm.c
688
res = (cur & 0x7fffffff) - (base & 0x7fffffff);
sound/soc/codecs/cs47l92.c
106
MADERA_OUT3L_ENA | MADERA_OUT3R_ENA, cur);
sound/soc/codecs/cs47l92.c
111
if (cur & (MADERA_OUT3L_ENA | MADERA_OUT3R_ENA))
sound/soc/codecs/cs47l92.c
61
unsigned int ep_sel, mux, change, cur;
sound/soc/codecs/cs47l92.c
80
ret = regmap_read(madera->regmap, MADERA_OUTPUT_ENABLES_1, &cur);
sound/soc/codecs/madera.c
1009
unsigned int cur, new;
sound/soc/codecs/madera.c
1021
ret = regmap_read(dsp->cs_dsp.regmap, dsp->cs_dsp.base, &cur);
sound/soc/codecs/madera.c
1028
cur &= MADERA_DSP_RATE_MASK;
sound/soc/codecs/madera.c
1032
if (new == cur) {
sound/soc/codecs/madera.c
2988
unsigned int reg, cur, tar;
sound/soc/codecs/madera.c
3035
base + MADERA_AIF_RATE_CTRL, &cur);
sound/soc/codecs/madera.c
3041
if ((cur & MADERA_AIF1_RATE_MASK) == (tar & MADERA_AIF1_RATE_MASK))
sound/soc/uniphier/aio-core.c
746
int cur, diff, slope = 0, fs;
sound/soc/uniphier/aio-core.c
751
cur = aio_port_get_volume(sub);
sound/soc/uniphier/aio-core.c
752
diff = abs(vol - cur);
sound/soc/uniphier/aio-core.c
763
if (cur < vol)
sound/synth/emux/soundfont.c
1145
struct snd_sf_zone *cur;
sound/synth/emux/soundfont.c
1152
for (cur = sf->zones; cur; cur = cur->next) {
sound/synth/emux/soundfont.c
1153
if (! cur->mapped && cur->sample == NULL) {
sound/synth/emux/soundfont.c
1155
cur->sample = set_sample(sf, &cur->v);
sound/synth/emux/soundfont.c
1156
if (cur->sample == NULL)
sound/synth/emux/soundfont.c
1160
add_preset(sflist, cur);
sound/synth/emux/soundfont.c
1170
add_preset(struct snd_sf_list *sflist, struct snd_sf_zone *cur)
sound/synth/emux/soundfont.c
1175
zone = search_first_zone(sflist, cur->bank, cur->instr, cur->v.low);
sound/synth/emux/soundfont.c
1176
if (zone && zone->v.sf_id != cur->v.sf_id) {
sound/synth/emux/soundfont.c
1181
if (p->counter > cur->counter)
sound/synth/emux/soundfont.c
1191
index = get_index(cur->bank, cur->instr, cur->v.low);
sound/synth/emux/soundfont.c
1194
cur->next_zone = zone; /* zone link */
sound/synth/emux/soundfont.c
1195
cur->next_instr = sflist->presets[index]; /* preset table link */
sound/synth/emux/soundfont.c
1196
sflist->presets[index] = cur;
sound/synth/emux/soundfont.c
52
static void add_preset(struct snd_sf_list *sflist, struct snd_sf_zone *cur);
sound/usb/clock.c
295
int ret, i, cur, err, pins, clock_id;
sound/usb/clock.c
329
cur = 0;
sound/usb/clock.c
374
cur = ret;
sound/usb/clock.c
383
err = uac_clock_selector_set_val(chip, entity_id, cur, fmt->iface);
sound/usb/clock.c
405
if (i == cur)
tools/bpf/bpftool/cfg.c
131
static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
tools/bpf/bpftool/cfg.c
136
func = cfg_append_func(cfg, cur);
tools/bpf/bpftool/cfg.c
140
for (; cur < end; cur++) {
tools/bpf/bpftool/cfg.c
141
if (cur->code != (BPF_JMP | BPF_CALL))
tools/bpf/bpftool/cfg.c
143
if (cur->src_reg != BPF_PSEUDO_CALL)
tools/bpf/bpftool/cfg.c
145
func = cfg_append_func(cfg, cur + cur->off + 1);
tools/bpf/bpftool/cfg.c
167
struct bpf_insn *cur, *end;
tools/bpf/bpftool/cfg.c
170
cur = func->start;
tools/bpf/bpftool/cfg.c
173
bb = func_append_bb(func, cur);
tools/bpf/bpftool/cfg.c
177
for (; cur <= end; cur++) {
tools/bpf/bpftool/cfg.c
178
if (is_jmp_insn(cur->code)) {
tools/bpf/bpftool/cfg.c
179
__u8 opcode = BPF_OP(cur->code);
tools/bpf/bpftool/cfg.c
184
bb = func_append_bb(func, cur + cur->off + 1);
tools/bpf/bpftool/cfg.c
189
bb = func_append_bb(func, cur + 1);
tools/bpf/bpftool/xlated_dumper.c
382
struct bpf_insn *cur = insn_start;
tools/bpf/bpftool/xlated_dumper.c
387
for (; cur <= insn_end; cur++) {
tools/bpf/bpftool/xlated_dumper.c
394
double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
tools/bpf/bpftool/xlated_dumper.c
396
insn_off = (unsigned int)(cur - insn_start + start_idx);
tools/bpf/bpftool/xlated_dumper.c
419
print_bpf_insn(&cbs, cur, true);
tools/bpf/bpftool/xlated_dumper.c
423
fprint_hex(stdout, cur, 8, " ");
tools/bpf/bpftool/xlated_dumper.c
424
if (double_insn && cur <= insn_end - 1) {
tools/bpf/bpftool/xlated_dumper.c
426
fprint_hex(stdout, cur + 1, 8, " ");
tools/bpf/bpftool/xlated_dumper.c
431
if (cur != insn_end)
tools/lib/bitmap.c
35
unsigned int cur, rbot, rtop;
tools/lib/bitmap.c
39
rbot = cur = find_first_bit(bitmap, nbits);
tools/lib/bitmap.c
40
while (cur < nbits) {
tools/lib/bitmap.c
41
rtop = cur;
tools/lib/bitmap.c
42
cur = find_next_bit(bitmap, nbits, cur + 1);
tools/lib/bitmap.c
43
if (cur < nbits && cur <= rtop + 1)
tools/lib/bitmap.c
55
rbot = cur;
tools/lib/bpf/bpf_helpers.h
377
#define bpf_for_each(type, cur, args...) for ( \
tools/lib/bpf/bpf_helpers.h
388
(((cur) = bpf_iter_##type##_next(&___it))); \
tools/lib/bpf/btf_dump.c
227
struct hashmap_entry *cur;
tools/lib/bpf/btf_dump.c
232
hashmap__for_each_entry(map, cur, bkt)
tools/lib/bpf/btf_dump.c
233
free((void *)cur->pkey);
tools/lib/bpf/gen_loader.c
193
int cur;
tools/lib/bpf/gen_loader.c
196
cur = add_data(gen, NULL, sizeof(int));
tools/lib/bpf/gen_loader.c
197
return (cur - gen->fd_array) / sizeof(int);
tools/lib/bpf/hashmap.c
104
struct hashmap_entry *cur, *tmp;
tools/lib/bpf/hashmap.c
117
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/lib/bpf/hashmap.c
118
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
tools/lib/bpf/hashmap.c
119
hashmap_add_entry(&new_buckets[h], cur);
tools/lib/bpf/hashmap.c
135
struct hashmap_entry *cur, **prev_ptr;
tools/lib/bpf/hashmap.c
140
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
tools/lib/bpf/hashmap.c
141
cur;
tools/lib/bpf/hashmap.c
142
prev_ptr = &cur->next, cur = cur->next) {
tools/lib/bpf/hashmap.c
143
if (map->equal_fn(cur->key, key, map->ctx)) {
tools/lib/bpf/hashmap.c
146
*entry = cur;
tools/lib/bpf/hashmap.c
65
struct hashmap_entry *cur, *tmp;
tools/lib/bpf/hashmap.c
68
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/lib/bpf/hashmap.c
69
free(cur);
tools/lib/bpf/hashmap.h
168
#define hashmap__for_each_entry(map, cur, bkt) \
tools/lib/bpf/hashmap.h
170
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
tools/lib/bpf/hashmap.h
180
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
tools/lib/bpf/hashmap.h
182
for (cur = (map)->buckets[bkt]; \
tools/lib/bpf/hashmap.h
183
cur && ({tmp = cur->next; true; }); \
tools/lib/bpf/hashmap.h
184
cur = tmp)
tools/lib/bpf/hashmap.h
192
#define hashmap__for_each_key_entry(map, cur, _key) \
tools/lib/bpf/hashmap.h
193
for (cur = (map)->buckets \
tools/lib/bpf/hashmap.h
196
cur; \
tools/lib/bpf/hashmap.h
197
cur = cur->next) \
tools/lib/bpf/hashmap.h
198
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/lib/bpf/hashmap.h
200
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
tools/lib/bpf/hashmap.h
201
for (cur = (map)->buckets \
tools/lib/bpf/hashmap.h
204
cur && ({ tmp = cur->next; true; }); \
tools/lib/bpf/hashmap.h
205
cur = tmp) \
tools/lib/bpf/hashmap.h
206
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/lib/bpf/libbpf.c
7962
static char *find_prev_line(char *buf, char *cur)
tools/lib/bpf/libbpf.c
7966
if (cur == buf) /* end of a log buf */
tools/lib/bpf/libbpf.c
7969
p = cur - 1;
tools/lib/bpf/linker.c
2945
void *cur = output;
tools/lib/bpf/linker.c
2956
sec_info = cur;
tools/lib/bpf/linker.c
2959
cur += sizeof(struct btf_ext_info_sec);
tools/lib/bpf/linker.c
2962
memcpy(cur, sec_data->recs, sz);
tools/lib/bpf/linker.c
2963
cur += sz;
tools/lib/bpf/linker.c
2965
return cur - output;
tools/lib/bpf/linker.c
2973
void *data, *cur;
tools/lib/bpf/linker.c
3035
cur = data = calloc(1, total_sz);
tools/lib/bpf/linker.c
3039
hdr = cur;
tools/lib/bpf/linker.c
3044
cur += sizeof(struct btf_ext_header);
tools/lib/bpf/linker.c
3055
*(__u32 *)cur = func_rec_sz;
tools/lib/bpf/linker.c
3056
cur += sizeof(__u32);
tools/lib/bpf/linker.c
3061
sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->func_info);
tools/lib/bpf/linker.c
3067
cur += sz;
tools/lib/bpf/linker.c
3072
*(__u32 *)cur = line_rec_sz;
tools/lib/bpf/linker.c
3073
cur += sizeof(__u32);
tools/lib/bpf/linker.c
3078
sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->line_info);
tools/lib/bpf/linker.c
3084
cur += sz;
tools/lib/bpf/linker.c
3089
*(__u32 *)cur = core_relo_rec_sz;
tools/lib/bpf/linker.c
3090
cur += sizeof(__u32);
tools/lib/bpf/linker.c
3095
sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->core_relo_info);
tools/lib/bpf/linker.c
3101
cur += sz;
tools/objtool/elf.c
49
struct elf_hash_node *cur, *prev;
tools/objtool/elf.c
56
for (prev = NULL, cur = *head; cur; prev = cur, cur = cur->next) {
tools/objtool/elf.c
57
if (cur == node) {
tools/objtool/elf.c
58
prev->next = cur->next;
tools/perf/builtin-kwork.c
339
struct kwork_work *cur;
tools/perf/builtin-kwork.c
343
cur = container_of(*new, struct kwork_work, node);
tools/perf/builtin-kwork.c
345
cmp = work_cmp(sort_list, key, cur);
tools/perf/tests/pmu-events.c
779
char *dup, *cur;
tools/perf/tests/pmu-events.c
793
for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))
tools/perf/tests/pmu-events.c
794
*cur = '/';
tools/perf/tests/pmu-events.c
932
struct hashmap_entry *cur;
tools/perf/tests/pmu-events.c
957
hashmap__for_each_entry(ctx->ids, cur, bkt)
tools/perf/tests/pmu-events.c
958
expr__add_id_val(ctx, strdup(cur->pkey), i++);
tools/perf/tests/pmu-events.c
960
hashmap__for_each_entry(ctx->ids, cur, bkt) {
tools/perf/tests/pmu-events.c
961
if (check_parse_fake(cur->pkey)) {
tools/perf/tests/pmu-events.c
974
hashmap__for_each_entry(ctx->ids, cur, bkt)
tools/perf/tests/pmu-events.c
975
expr__add_id_val(ctx, strdup(cur->pkey), i--);
tools/perf/ui/browsers/annotate.c
1252
struct hashmap_entry *cur;
tools/perf/ui/browsers/annotate.c
1255
hashmap__for_each_entry(browser.type_hash, cur, bkt)
tools/perf/ui/browsers/annotate.c
1256
zfree(&cur->pvalue);
tools/perf/util/annotate.c
116
struct hashmap_entry *cur;
tools/perf/util/annotate.c
123
hashmap__for_each_entry(src->samples, cur, bkt)
tools/perf/util/annotate.c
124
zfree(&cur->pvalue);
tools/perf/util/bpf_lock_contention.c
102
struct hashmap_entry *cur;
tools/perf/util/bpf_lock_contention.c
105
hashmap__for_each_entry(&slab_hash, cur, bkt)
tools/perf/util/bpf_lock_contention.c
106
free(cur->pvalue);
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
764
ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
771
cur = ocsd_dt_process_data(decoder->dcd_tree,
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
778
cur = ocsd_dt_process_data(decoder->dcd_tree,
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
795
if (OCSD_DATA_RESP_IS_WAIT(cur))
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
798
prev_return = cur;
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
801
decoder->prev_return = cur;
tools/perf/util/demangle-rust-v0.c
454
size_t cur = 0;
tools/perf/util/demangle-rust-v0.c
455
for(;cur < len && buf[cur] == '0';cur++);
tools/perf/util/demangle-rust-v0.c
457
if (len - cur > 16) return false;
tools/perf/util/demangle-rust-v0.c
458
for(;cur < len;cur++) {
tools/perf/util/demangle-rust-v0.c
459
char c = buf[cur];
tools/perf/util/evsel.c
4171
struct hashmap_entry *cur;
tools/perf/util/evsel.c
4175
hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
tools/perf/util/evsel.c
4176
zfree(&cur->pkey);
tools/perf/util/expr.c
109
struct hashmap_entry *cur;
tools/perf/util/expr.c
126
hashmap__for_each_entry(ids2, cur, bkt) {
tools/perf/util/expr.c
127
ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
tools/perf/util/expr.c
230
struct hashmap_entry *cur;
tools/perf/util/expr.c
234
hashmap__for_each_entry(needles->ids, cur, bkt) {
tools/perf/util/expr.c
235
if (expr__get_id(haystack, cur->pkey, &data))
tools/perf/util/expr.c
309
struct hashmap_entry *cur;
tools/perf/util/expr.c
312
hashmap__for_each_entry(ctx->ids, cur, bkt) {
tools/perf/util/expr.c
313
zfree(&cur->pkey);
tools/perf/util/expr.c
314
zfree(&cur->pvalue);
tools/perf/util/expr.c
321
struct hashmap_entry *cur;
tools/perf/util/expr.c
328
hashmap__for_each_entry(ctx->ids, cur, bkt) {
tools/perf/util/expr.c
329
zfree(&cur->pkey);
tools/perf/util/expr.c
330
zfree(&cur->pvalue);
tools/perf/util/expr.c
78
struct hashmap_entry *cur;
tools/perf/util/expr.c
84
hashmap__for_each_entry(ids, cur, bkt) {
tools/perf/util/expr.c
85
zfree(&cur->pkey);
tools/perf/util/expr.c
86
zfree(&cur->pvalue);
tools/perf/util/genelf_debug.c
200
ubyte cur = data & 0x7F;
tools/perf/util/genelf_debug.c
203
cur |= 0x80;
tools/perf/util/genelf_debug.c
204
buffer_ext_add(be, &cur, 1);
tools/perf/util/genelf_debug.c
214
ubyte cur = data & 0x7F;
tools/perf/util/genelf_debug.c
218
if ((data == 0 && !(cur & 0x40)) ||
tools/perf/util/genelf_debug.c
219
(data == -1l && (cur & 0x40)))
tools/perf/util/genelf_debug.c
222
cur |= 0x80;
tools/perf/util/genelf_debug.c
223
buffer_ext_add(be, &cur, 1);
tools/perf/util/hashmap.c
104
struct hashmap_entry *cur, *tmp;
tools/perf/util/hashmap.c
117
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/perf/util/hashmap.c
118
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
tools/perf/util/hashmap.c
119
hashmap_add_entry(&new_buckets[h], cur);
tools/perf/util/hashmap.c
135
struct hashmap_entry *cur, **prev_ptr;
tools/perf/util/hashmap.c
140
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
tools/perf/util/hashmap.c
141
cur;
tools/perf/util/hashmap.c
142
prev_ptr = &cur->next, cur = cur->next) {
tools/perf/util/hashmap.c
143
if (map->equal_fn(cur->key, key, map->ctx)) {
tools/perf/util/hashmap.c
146
*entry = cur;
tools/perf/util/hashmap.c
65
struct hashmap_entry *cur, *tmp;
tools/perf/util/hashmap.c
68
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
tools/perf/util/hashmap.c
69
free(cur);
tools/perf/util/hashmap.h
168
#define hashmap__for_each_entry(map, cur, bkt) \
tools/perf/util/hashmap.h
170
for (cur = (map)->buckets[bkt]; cur; cur = cur->next)
tools/perf/util/hashmap.h
180
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
tools/perf/util/hashmap.h
182
for (cur = (map)->buckets[bkt]; \
tools/perf/util/hashmap.h
183
cur && ({tmp = cur->next; true; }); \
tools/perf/util/hashmap.h
184
cur = tmp)
tools/perf/util/hashmap.h
192
#define hashmap__for_each_key_entry(map, cur, _key) \
tools/perf/util/hashmap.h
193
for (cur = (map)->buckets \
tools/perf/util/hashmap.h
196
cur; \
tools/perf/util/hashmap.h
197
cur = cur->next) \
tools/perf/util/hashmap.h
198
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/perf/util/hashmap.h
200
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
tools/perf/util/hashmap.h
201
for (cur = (map)->buckets \
tools/perf/util/hashmap.h
204
cur && ({ tmp = cur->next; true; }); \
tools/perf/util/hashmap.h
205
cur = tmp) \
tools/perf/util/hashmap.h
206
if ((map)->equal_fn(cur->key, (_key), (map)->ctx))
tools/perf/util/hwmon_pmu.c
239
struct hashmap_entry *cur, *tmp;
tools/perf/util/hwmon_pmu.c
323
hashmap__for_each_entry_safe((&pmu->events), cur, tmp, bkt) {
tools/perf/util/hwmon_pmu.c
325
.type_and_num = cur->key,
tools/perf/util/hwmon_pmu.c
327
struct hwmon_pmu_event_value *value = cur->pvalue;
tools/perf/util/hwmon_pmu.c
396
struct hashmap_entry *cur, *tmp;
tools/perf/util/hwmon_pmu.c
399
hashmap__for_each_entry_safe((&hwm->events), cur, tmp, bkt) {
tools/perf/util/hwmon_pmu.c
400
struct hwmon_pmu_event_value *value = cur->pvalue;
tools/perf/util/hwmon_pmu.c
462
struct hashmap_entry *cur;
tools/perf/util/hwmon_pmu.c
468
hashmap__for_each_entry((&hwm->events), cur, bkt) {
tools/perf/util/hwmon_pmu.c
499
.type_and_num = cur->key,
tools/perf/util/hwmon_pmu.c
501
struct hwmon_pmu_event_value *value = cur->pvalue;
tools/perf/util/hwmon_pmu.c
536
pmu->name, cur->key);
tools/perf/util/hwmon_pmu.c
559
struct hashmap_entry *cur;
tools/perf/util/hwmon_pmu.c
575
hashmap__for_each_entry((&hwm->events), cur, bkt) {
tools/perf/util/hwmon_pmu.c
576
struct hwmon_pmu_event_value *value = cur->pvalue;
tools/perf/util/hwmon_pmu.c
578
key.type_and_num = cur->key;
tools/perf/util/hwmon_pmu.c
601
struct hashmap_entry *cur;
tools/perf/util/hwmon_pmu.c
605
hashmap__for_each_entry((&hwm->events), cur, bkt) {
tools/perf/util/hwmon_pmu.c
607
.type_and_num = cur->key,
tools/perf/util/hwmon_pmu.c
609
struct hwmon_pmu_event_value *value = cur->pvalue;
tools/perf/util/machine.c
2549
struct perf_sample *cur,
tools/perf/util/machine.c
2554
struct branch_stack *cur_stack = cur->branch_stack;
tools/perf/util/machine.c
2555
struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
tools/perf/util/metricgroup.c
1216
struct hashmap_entry *cur;
tools/perf/util/metricgroup.c
1228
hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
tools/perf/util/metricgroup.c
1229
dup = strdup(cur->pkey);
tools/perf/util/metricgroup.c
503
char *cur;
tools/perf/util/metricgroup.c
532
for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
tools/perf/util/metricgroup.c
533
*cur = '/';
tools/perf/util/metricgroup.c
563
struct hashmap_entry *cur;
tools/perf/util/metricgroup.c
571
hashmap__for_each_entry(ctx->ids, cur, bkt) {
tools/perf/util/metricgroup.c
572
const char *sep, *rsep, *id = cur->pkey;
tools/perf/util/metricgroup.c
745
struct hashmap_entry *cur;
tools/perf/util/metricgroup.c
762
hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
tools/perf/util/metricgroup.c
765
if (pmu_metrics_table__find_metric(table, pmu, cur->pkey,
tools/perf/util/metricgroup.c
774
pending[pending_cnt].key = cur->pkey;
tools/perf/util/python.c
1343
struct evsel *cur = metric_events[i];
tools/perf/util/python.c
1347
char *n = strdup(evsel__metric_id(cur));
tools/perf/util/python.c
1359
cur->pmu == evsel->metric_leader->pmu) {
tools/perf/util/python.c
1365
if (pos->metric_leader != cur)
tools/perf/util/python.c
1367
cur = pos;
tools/perf/util/python.c
1374
source_count = evsel__source_count(cur);
tools/perf/util/python.c
1376
ret = evsel__ensure_counts(cur);
tools/perf/util/python.c
1381
old_count = perf_counts(cur->prev_raw_counts, cpu_idx, thread_idx);
tools/perf/util/python.c
1382
new_count = perf_counts(cur->counts, cpu_idx, thread_idx);
tools/perf/util/python.c
1384
evsel__read_counter(cur, cpu_idx, thread_idx);
tools/perf/util/strfilter.c
100
if (!cur)
tools/perf/util/strfilter.c
102
last_op->r = cur;
tools/perf/util/strfilter.c
103
last_op = cur;
tools/perf/util/strfilter.c
106
if (!cur->r || !root.r)
tools/perf/util/strfilter.c
108
cur = strfilter_node__alloc(OP_or, root.r, NULL);
tools/perf/util/strfilter.c
109
if (!cur)
tools/perf/util/strfilter.c
111
root.r = cur;
tools/perf/util/strfilter.c
112
last_op = cur;
tools/perf/util/strfilter.c
115
if (cur->r)
tools/perf/util/strfilter.c
117
cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
tools/perf/util/strfilter.c
118
if (!cur->r)
tools/perf/util/strfilter.c
120
cur = cur->r;
tools/perf/util/strfilter.c
123
if (cur->r)
tools/perf/util/strfilter.c
125
cur->r = strfilter_node__new(s + 1, &s);
tools/perf/util/strfilter.c
128
if (!cur->r || *s != ')')
tools/perf/util/strfilter.c
133
if (cur->r)
tools/perf/util/strfilter.c
135
cur->r = strfilter_node__alloc(NULL, NULL, NULL);
tools/perf/util/strfilter.c
136
if (!cur->r)
tools/perf/util/strfilter.c
138
cur->r->p = strndup(s, e - s);
tools/perf/util/strfilter.c
139
if (!cur->r->p)
tools/perf/util/strfilter.c
144
if (!cur->r)
tools/perf/util/strfilter.c
84
struct strfilter_node root, *cur, *last_op;
tools/perf/util/strfilter.c
91
last_op = cur = &root;
tools/perf/util/strfilter.c
97
if (!cur->r || !last_op->r)
tools/perf/util/strfilter.c
99
cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
tools/perf/util/threads.c
139
struct hashmap_entry *cur, *tmp;
tools/perf/util/threads.c
144
hashmap__for_each_entry_safe(&table->shard, cur, tmp, bkt) {
tools/perf/util/threads.c
147
hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
tools/perf/util/threads.c
174
struct hashmap_entry *cur;
tools/perf/util/threads.c
178
hashmap__for_each_entry(&table->shard, cur, bkt) {
tools/perf/util/threads.c
179
int rc = fn((struct thread *)cur->pvalue, data);
tools/perf/util/unwind-libunwind-local.c
115
u8 *cur = *p;
tools/perf/util/unwind-libunwind-local.c
123
*val = dw_read(cur, unsigned long, end);
tools/perf/util/unwind-libunwind-local.c
133
*val = (unsigned long) cur;
tools/perf/util/unwind-libunwind-local.c
144
*val += dw_read(cur, s32, end);
tools/perf/util/unwind-libunwind-local.c
147
*val += dw_read(cur, u32, end);
tools/perf/util/unwind-libunwind-local.c
150
*val += dw_read(cur, s64, end);
tools/perf/util/unwind-libunwind-local.c
153
*val += dw_read(cur, u64, end);
tools/perf/util/unwind-libunwind-local.c
160
*p = cur;
tools/power/cpupower/utils/helpers/bitmask.c
279
unsigned int cur, rbot, rtop;
tools/power/cpupower/utils/helpers/bitmask.c
283
rbot = cur = bitmask_first(bmp);
tools/power/cpupower/utils/helpers/bitmask.c
284
while (cur < bmp->size) {
tools/power/cpupower/utils/helpers/bitmask.c
285
rtop = cur;
tools/power/cpupower/utils/helpers/bitmask.c
286
cur = bitmask_next(bmp, cur+1);
tools/power/cpupower/utils/helpers/bitmask.c
287
if (cur >= bmp->size || cur > rtop + 1) {
tools/power/cpupower/utils/helpers/bitmask.c
289
rbot = cur;
tools/sched_ext/scx_flatcg.c
57
char *line, *cur = NULL, *tok;
tools/sched_ext/scx_flatcg.c
76
for (idx = 0; (tok = strtok_r(line, " \n", &cur)); idx++) {
tools/sched_ext/scx_qmap.bpf.c
757
u32 cap, cur;
tools/sched_ext/scx_qmap.bpf.c
765
cur = scx_bpf_cpuperf_cur(i);
tools/sched_ext/scx_qmap.bpf.c
767
cur_min = cur < cur_min ? cur : cur_min;
tools/sched_ext/scx_qmap.bpf.c
768
cur_max = cur > cur_max ? cur : cur_max;
tools/sched_ext/scx_qmap.bpf.c
775
cur_sum += cur * cap / SCX_CPUPERF_ONE;
tools/sched_ext/scx_qmap.bpf.c
784
cur = cpuc->cpuperf_target;
tools/sched_ext/scx_qmap.bpf.c
785
target_sum += cur;
tools/sched_ext/scx_qmap.bpf.c
786
target_min = cur < target_min ? cur : target_min;
tools/sched_ext/scx_qmap.bpf.c
787
target_max = cur > target_max ? cur : target_max;
tools/testing/radix-tree/main.c
160
unsigned long start, end, count = 0, tagged, cur, tmp;
tools/testing/radix-tree/main.c
167
cur = start;
tools/testing/radix-tree/main.c
169
end = cur;
tools/testing/radix-tree/main.c
173
cur = rand();
tools/testing/radix-tree/main.c
174
if (cur & 1) {
tools/testing/radix-tree/main.c
176
if (cur & 2) {
tools/testing/radix-tree/main.c
182
if (cur & 4) {
tools/testing/radix-tree/main.c
184
if (cur & 8)
tools/testing/radix-tree/main.c
187
if (cur & 16) {
tools/testing/radix-tree/main.c
189
if (cur & 32) {
tools/testing/radix-tree/main.c
195
if (cur & 64) {
tools/testing/radix-tree/main.c
197
if (cur & 128)
tools/testing/selftests/arm64/gcs/libc-gcs.c
112
unsigned long *gcs, *cur;
tools/testing/selftests/arm64/gcs/libc-gcs.c
115
cur = gcs;
tools/testing/selftests/arm64/gcs/libc-gcs.c
116
while (*cur)
tools/testing/selftests/arm64/gcs/libc-gcs.c
117
cur++;
tools/testing/selftests/arm64/gcs/libc-gcs.c
119
ksft_print_msg("GCS in use from %p-%p\n", gcs, cur);
tools/testing/selftests/arm64/gcs/libc-gcs.c
125
ASSERT_TRUE(gcs != cur);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1009
struct lpm_trie_bytes_key *cur;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1065
cur = NULL;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1068
err = bpf_map_get_next_key(fd, cur, &next_key);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1076
cur = &next_key;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1078
err = bpf_map_get_next_key(fd, cur, &next_key);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1082
cur = NULL;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1085
err = bpf_map_get_next_key(fd, cur, &next_key);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1093
cur = &next_key;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1095
err = bpf_map_delete_elem(fd, cur);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1098
err = bpf_map_get_next_key(fd, cur, &next_key);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1114
struct lpm_trie_int_key *cur;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1135
cur = NULL;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1137
err = bpf_map_get_next_key(fd, cur, &next_key);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1143
cur = &next_key;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
1152
err = bpf_map_get_next_key(fd, cur, &next_key);
tools/testing/selftests/bpf/prog_tests/btf.c
8136
const char *cur = strs;
tools/testing/selftests/bpf/prog_tests/btf.c
8139
while (cur < strs + len) {
tools/testing/selftests/bpf/prog_tests/btf.c
8140
fprintf(stderr, "string #%d: '%s'\n", i, cur);
tools/testing/selftests/bpf/prog_tests/btf.c
8141
cur += strlen(cur) + 1;
tools/testing/selftests/bpf/prog_tests/file_reader.c
29
ssize_t n = 0, cur, off;
tools/testing/selftests/bpf/prog_tests/file_reader.c
37
cur = read(fd, file_contents + n, sizeof(file_contents) - n);
tools/testing/selftests/bpf/prog_tests/file_reader.c
38
if (!ASSERT_GT(cur, 0, "read success"))
tools/testing/selftests/bpf/prog_tests/file_reader.c
40
n += cur;
tools/testing/selftests/bpf/prog_tests/hashmap.c
263
struct hashmap_entry *cur;
tools/testing/selftests/bpf/prog_tests/hashmap.c
319
hashmap__for_each_entry(map, cur, bkt) {
tools/testing/selftests/bpf/prog_tests/hashmap.c
322
key = cur->pkey;
tools/testing/selftests/bpf/prog_tests/hashmap.c
323
value = cur->pvalue;
tools/testing/selftests/bpf/prog_tests/net_timestamping.c
38
static void validate_timestamp(struct timespec *cur, struct timespec *prev)
tools/testing/selftests/bpf/prog_tests/net_timestamping.c
42
cur_ns = timespec_to_ns64(cur);
tools/testing/selftests/bpf/veristat.c
1006
for (cur = &buf[pos]; cur > buf && cur[0] != '\n'; cur--, pos--) {
tools/testing/selftests/bpf/veristat.c
1013
if (cur[0] == '\n')
tools/testing/selftests/bpf/veristat.c
1014
cur++;
tools/testing/selftests/bpf/veristat.c
1016
if (1 == sscanf(cur, "verification time %ld usec\n", &s->stats[DURATION]))
tools/testing/selftests/bpf/veristat.c
1018
if (5 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld",
tools/testing/selftests/bpf/veristat.c
1026
if (1 == sscanf(cur, "stack depth %511s", stack))
tools/testing/selftests/bpf/veristat.c
1068
struct line_cnt *cur;
tools/testing/selftests/bpf/veristat.c
1104
cur = freq;
tools/testing/selftests/bpf/veristat.c
1105
cur->line = lines[0];
tools/testing/selftests/bpf/veristat.c
1106
cur->cnt = 1;
tools/testing/selftests/bpf/veristat.c
1108
if (strcmp(lines[i], cur->line) != 0) {
tools/testing/selftests/bpf/veristat.c
1109
cur++;
tools/testing/selftests/bpf/veristat.c
1110
cur->line = lines[i];
tools/testing/selftests/bpf/veristat.c
1111
cur->cnt = 0;
tools/testing/selftests/bpf/veristat.c
1113
cur->cnt++;
tools/testing/selftests/bpf/veristat.c
1115
unique_lines = cur - freq + 1;
tools/testing/selftests/bpf/veristat.c
1769
struct var_preset *cur;
tools/testing/selftests/bpf/veristat.c
1777
cur = &(*presets)[*cnt];
tools/testing/selftests/bpf/veristat.c
1778
memset(cur, 0, sizeof(*cur));
tools/testing/selftests/bpf/veristat.c
1788
err = parse_rvalue(val, &cur->value);
tools/testing/selftests/bpf/veristat.c
1792
cur->full_name = strdup(var);
tools/testing/selftests/bpf/veristat.c
1793
if (!cur->full_name)
tools/testing/selftests/bpf/veristat.c
1796
err = parse_var_atoms(var, cur);
tools/testing/selftests/bpf/veristat.c
998
const char *cur;
tools/testing/selftests/filesystems/statmount/listmount_test.c
36
for (size_t cur = 0; cur < nr_mounts; cur++) {
tools/testing/selftests/filesystems/statmount/listmount_test.c
37
if (cur < nr_mounts - 1)
tools/testing/selftests/filesystems/statmount/listmount_test.c
38
ASSERT_LT(list[cur], list[cur + 1]);
tools/testing/selftests/filesystems/statmount/listmount_test.c
39
last_mnt_id = list[cur];
tools/testing/selftests/filesystems/statmount/listmount_test.c
58
for (size_t cur = 0; cur < nr_mounts; cur++) {
tools/testing/selftests/filesystems/statmount/listmount_test.c
59
if (cur < nr_mounts - 1)
tools/testing/selftests/filesystems/statmount/listmount_test.c
60
ASSERT_GT(list[cur], list[cur + 1]);
tools/testing/selftests/filesystems/statmount/listmount_test.c
61
last_mnt_id = list[cur];
tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
852
uint32_t cur = sched_getcpu();
tools/testing/selftests/kvm/arm64/arch_timer_edge_cases.c
853
uint32_t next = cur;
tools/testing/selftests/kvm/dirty_log_test.c
340
struct kvm_dirty_gfn *cur;
tools/testing/selftests/kvm/dirty_log_test.c
344
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
tools/testing/selftests/kvm/dirty_log_test.c
345
if (!dirty_gfn_is_dirtied(cur))
tools/testing/selftests/kvm/dirty_log_test.c
347
TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
tools/testing/selftests/kvm/dirty_log_test.c
348
"%u != %u", cur->slot, slot);
tools/testing/selftests/kvm/dirty_log_test.c
349
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
tools/testing/selftests/kvm/dirty_log_test.c
350
"0x%llx >= 0x%x", cur->offset, num_pages);
tools/testing/selftests/kvm/dirty_log_test.c
351
__set_bit_le(cur->offset, bitmap);
tools/testing/selftests/kvm/dirty_log_test.c
352
dirty_ring_last_page = cur->offset;
tools/testing/selftests/kvm/dirty_log_test.c
353
dirty_gfn_set_collected(cur);
tools/testing/selftests/kvm/lib/kvm_util.c
872
struct rb_node **cur, *parent;
tools/testing/selftests/kvm/lib/kvm_util.c
874
for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
tools/testing/selftests/kvm/lib/kvm_util.c
877
cregion = container_of(*cur, typeof(*cregion), gpa_node);
tools/testing/selftests/kvm/lib/kvm_util.c
878
parent = *cur;
tools/testing/selftests/kvm/lib/kvm_util.c
881
cur = &(*cur)->rb_left;
tools/testing/selftests/kvm/lib/kvm_util.c
887
cur = &(*cur)->rb_right;
tools/testing/selftests/kvm/lib/kvm_util.c
891
rb_link_node(&region->gpa_node, parent, cur);
tools/testing/selftests/kvm/lib/kvm_util.c
898
struct rb_node **cur, *parent;
tools/testing/selftests/kvm/lib/kvm_util.c
900
for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
tools/testing/selftests/kvm/lib/kvm_util.c
903
cregion = container_of(*cur, typeof(*cregion), hva_node);
tools/testing/selftests/kvm/lib/kvm_util.c
904
parent = *cur;
tools/testing/selftests/kvm/lib/kvm_util.c
906
cur = &(*cur)->rb_left;
tools/testing/selftests/kvm/lib/kvm_util.c
912
cur = &(*cur)->rb_right;
tools/testing/selftests/kvm/lib/kvm_util.c
916
rb_link_node(&region->hva_node, parent, cur);
tools/testing/selftests/mm/cow.c
144
ssize_t cur, total, transferred;
tools/testing/selftests/mm/cow.c
175
for (total = 0; total < transferred; total += cur) {
tools/testing/selftests/mm/cow.c
176
cur = read(fds[0], new + total, transferred - total);
tools/testing/selftests/mm/cow.c
177
if (cur < 0)
tools/testing/selftests/mm/cow.c
293
ssize_t cur, total, transferred = 0;
tools/testing/selftests/mm/cow.c
367
for (total = 0; total < transferred; total += cur) {
tools/testing/selftests/mm/cow.c
368
cur = read(fds[0], new + total, transferred - total);
tools/testing/selftests/mm/cow.c
369
if (cur < 0) {
tools/testing/selftests/mm/cow.c
417
ssize_t cur, total;
tools/testing/selftests/mm/cow.c
549
cur = pread(fd, tmp + total, size - total, total);
tools/testing/selftests/mm/cow.c
550
if (cur < 0) {
tools/testing/selftests/mm/cow.c
555
total += cur;
tools/testing/selftests/mm/mlock-random-test.c
137
struct rlimit cur;
tools/testing/selftests/mm/mlock-random-test.c
140
getrlimit(RLIMIT_MEMLOCK, &cur);
tools/testing/selftests/mm/mlock-random-test.c
141
if (cur.rlim_cur < alloc_size)
tools/testing/selftests/mm/mlock-random-test.c
143
alloc_size, (unsigned int)cur.rlim_cur);
tools/testing/selftests/mm/mlock-random-test.c
204
struct rlimit cur;
tools/testing/selftests/mm/mlock-random-test.c
206
getrlimit(RLIMIT_MEMLOCK, &cur);
tools/testing/selftests/mm/mlock-random-test.c
207
if (cur.rlim_cur >= alloc_size)
tools/testing/selftests/mm/mlock-random-test.c
209
alloc_size, (unsigned int)cur.rlim_cur);
tools/testing/selftests/mm/mlock-random-test.c
215
int lock_size = (rand() % (alloc_size - cur.rlim_cur))
tools/testing/selftests/mm/mlock-random-test.c
216
+ cur.rlim_cur;
tools/testing/selftests/net/tap.c
302
uint8_t *cur = buf;
tools/testing/selftests/net/tap.c
311
cur += sizeof(*vh);
tools/testing/selftests/net/tap.c
313
cur += build_eth(cur, ETH_P_IP);
tools/testing/selftests/net/tap.c
314
cur += build_ipv4_header(cur, payload_len);
tools/testing/selftests/net/tap.c
315
cur += build_udp_packet(cur, payload_len, true);
tools/testing/selftests/net/tap.c
317
return cur - buf;
tools/testing/selftests/net/tap.c
322
uint8_t *cur = buf;
tools/testing/selftests/net/tap.c
327
cur += sizeof(*vh);
tools/testing/selftests/net/tap.c
329
cur += build_eth(cur, ETH_P_IP);
tools/testing/selftests/net/tap.c
330
cur += build_ipv4_header(cur, payload_len);
tools/testing/selftests/net/tap.c
331
cur += build_udp_packet(cur, payload_len, false);
tools/testing/selftests/net/tap.c
333
return cur - buf;
tools/testing/selftests/net/tap.c
339
uint8_t *cur = buf;
tools/testing/selftests/net/tap.c
346
cur += sizeof(*vh);
tools/testing/selftests/net/tap.c
348
cur += build_eth(cur, 0);
tools/testing/selftests/net/tap.c
349
cur += sizeof(struct iphdr) + sizeof(struct udphdr);
tools/testing/selftests/net/tap.c
350
cur += build_ipv4_header(cur, payload_len);
tools/testing/selftests/net/tap.c
351
cur += build_udp_packet(cur, payload_len, true);
tools/testing/selftests/net/tap.c
352
cur += payload_len;
tools/testing/selftests/net/tap.c
354
return cur - buf;
tools/testing/selftests/net/tun.c
410
static int validate_hdrlen(uint8_t **cur, int *len, int x)
tools/testing/selftests/net/tun.c
414
*cur += x;
tools/testing/selftests/net/tun.c
425
uint8_t *cur = buf;
tools/testing/selftests/net/tun.c
427
if (validate_hdrlen(&cur, &len, TUN_VNET_TNL_SIZE))
tools/testing/selftests/net/tun.c
431
if (validate_hdrlen(&cur, &len, ETH_HLEN))
tools/testing/selftests/net/tun.c
436
iph4 = (struct iphdr *)cur;
tools/testing/selftests/net/tun.c
437
if (validate_hdrlen(&cur, &len, sizeof(struct iphdr)))
tools/testing/selftests/net/tun.c
442
iph6 = (struct ipv6hdr *)cur;
tools/testing/selftests/net/tun.c
443
if (validate_hdrlen(&cur, &len, sizeof(struct ipv6hdr)))
tools/testing/selftests/net/tun.c
449
udph = (struct udphdr *)cur;
tools/testing/selftests/net/tun.c
450
if (validate_hdrlen(&cur, &len, sizeof(struct udphdr)))
tools/testing/selftests/net/tun.c
455
if (validate_hdrlen(&cur, &len, GENEVE_HLEN))
tools/testing/selftests/net/tun.c
457
if (validate_hdrlen(&cur, &len, ETH_HLEN))
tools/testing/selftests/net/tun.c
461
iph4 = (struct iphdr *)cur;
tools/testing/selftests/net/tun.c
462
if (validate_hdrlen(&cur, &len, sizeof(struct iphdr)))
tools/testing/selftests/net/tun.c
467
iph6 = (struct ipv6hdr *)cur;
tools/testing/selftests/net/tun.c
468
if (validate_hdrlen(&cur, &len, sizeof(struct ipv6hdr)))
tools/testing/selftests/net/tun.c
474
udph = (struct udphdr *)cur;
tools/testing/selftests/net/tun.c
475
if (validate_hdrlen(&cur, &len, sizeof(struct udphdr)))
tools/testing/selftests/net/tun.c
796
uint8_t *outer_udph, *cur = buf;
tools/testing/selftests/net/tun.c
806
cur += build_virtio_net_hdr_v1_hash_tunnel(cur, is_tap, hlen, gso_size,
tools/testing/selftests/net/tun.c
815
cur += build_eth(cur, proto, dmac, smac);
tools/testing/selftests/net/tun.c
820
cur += build_ipv4_header(cur, IPPROTO_UDP, pktlen, dip, sip);
tools/testing/selftests/net/tun.c
823
cur += build_ipv6_header(cur, IPPROTO_UDP, 0, pktlen, dip, sip);
tools/testing/selftests/net/tun.c
826
outer_udph = cur;
tools/testing/selftests/net/tun.c
831
cur += build_udp_header(cur, UDP_SRC_PORT, VN_PORT, pktlen);
tools/testing/selftests/net/tun.c
832
cur += build_geneve_header(cur, VN_ID);
tools/testing/selftests/net/tun.c
833
cur += build_eth(cur, proto, dmac, smac);
tools/testing/selftests/net/tun.c
837
cur += build_ipv4_header(cur, IPPROTO_UDP, pktlen, dip, sip);
tools/testing/selftests/net/tun.c
839
cur += build_ipv6_header(cur, IPPROTO_UDP, 0, pktlen, dip, sip);
tools/testing/selftests/net/tun.c
841
cur += build_udp_packet(cur, UDP_DST_PORT, UDP_SRC_PORT, payload_len,
tools/testing/selftests/net/tun.c
846
return cur - buf;
tools/testing/selftests/net/txtimestamp.c
157
static void validate_timestamp(struct timespec *cur, int min_delay)
tools/testing/selftests/net/txtimestamp.c
162
cur64 = timespec_to_us64(cur);
tools/testing/selftests/net/txtimestamp.c
182
static void __print_timestamp(const char *name, struct timespec *cur,
tools/testing/selftests/net/txtimestamp.c
187
if (!(cur->tv_sec | cur->tv_nsec))
tools/testing/selftests/net/txtimestamp.c
192
name, cur->tv_sec, cur->tv_nsec,
tools/testing/selftests/net/txtimestamp.c
196
name, cur->tv_sec, cur->tv_nsec / NSEC_PER_USEC,
tools/testing/selftests/net/txtimestamp.c
199
if (cur != &ts_usr) {
tools/testing/selftests/net/txtimestamp.c
200
ts_delta = timespec_to_ns64(cur) - timespec_to_ns64(&ts_usr);
tools/testing/selftests/net/txtimestamp.c
838
struct addrinfo *addrs, *cur;
tools/testing/selftests/net/txtimestamp.c
845
cur = addrs;
tools/testing/selftests/net/txtimestamp.c
846
while (cur && !have_ipv4 && !have_ipv6) {
tools/testing/selftests/net/txtimestamp.c
847
if (!have_ipv4 && cur->ai_family == AF_INET) {
tools/testing/selftests/net/txtimestamp.c
848
memcpy(&daddr, cur->ai_addr, sizeof(daddr));
tools/testing/selftests/net/txtimestamp.c
852
else if (!have_ipv6 && cur->ai_family == AF_INET6) {
tools/testing/selftests/net/txtimestamp.c
853
memcpy(&daddr6, cur->ai_addr, sizeof(daddr6));
tools/testing/selftests/net/txtimestamp.c
857
cur = cur->ai_next;
tools/testing/selftests/net/udpgso_bench_rx.c
194
char cur = data[0];
tools/testing/selftests/net/udpgso_bench_rx.c
198
if (cur < 'a' || cur > 'z')
tools/testing/selftests/net/udpgso_bench_rx.c
202
if (cur == 'z')
tools/testing/selftests/net/udpgso_bench_rx.c
203
cur = 'a';
tools/testing/selftests/net/udpgso_bench_rx.c
205
cur++;
tools/testing/selftests/net/udpgso_bench_rx.c
207
if (data[i] != cur)
tools/testing/selftests/net/udpgso_bench_rx.c
211
sanitized_char(cur), cur);
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
75
#define fifo_free_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
76
? (len)-((cur)+(used)) : 0)
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
77
#define fifo_free_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
78
? (cur) : (len)-(used))
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
80
#define fifo_used_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
81
? (used) : (len)-(cur))
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
82
#define fifo_used_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
83
? 0 : ((used)+(cur))-(len))
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
85
#define fifo_free_first_offset(cur, used) ((cur)+(used))
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
86
#define fifo_free_last_offset(cur, used, len) \
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
87
fifo_used_last_bytes(cur, used, len)
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
89
#define fifo_used_first_offset(cur) (cur)
tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
90
#define fifo_used_last_offset(cur) (0)
tools/testing/selftests/x86/test_shadow_stack.c
563
struct node *head = NULL, *cur;
tools/testing/selftests/x86/test_shadow_stack.c
578
cur = malloc(sizeof(*cur));
tools/testing/selftests/x86/test_shadow_stack.c
579
cur->mapping = test_map;
tools/testing/selftests/x86/test_shadow_stack.c
581
cur->next = head;
tools/testing/selftests/x86/test_shadow_stack.c
582
head = cur;
tools/testing/selftests/x86/test_shadow_stack.c
586
cur = head;
tools/testing/selftests/x86/test_shadow_stack.c
587
head = cur->next;
tools/testing/selftests/x86/test_shadow_stack.c
588
munmap(cur->mapping, PAGE_SIZE);
tools/testing/selftests/x86/test_shadow_stack.c
589
free(cur);
tools/testing/selftests/x86/test_shadow_stack.c
606
struct node *head = NULL, *cur;
tools/testing/selftests/x86/test_shadow_stack.c
627
cur = malloc(sizeof(*cur));
tools/testing/selftests/x86/test_shadow_stack.c
628
cur->mapping = test_map;
tools/testing/selftests/x86/test_shadow_stack.c
630
cur->next = head;
tools/testing/selftests/x86/test_shadow_stack.c
631
head = cur;
tools/testing/selftests/x86/test_shadow_stack.c
641
cur = head;
tools/testing/selftests/x86/test_shadow_stack.c
642
head = cur->next;
tools/testing/selftests/x86/test_shadow_stack.c
643
munmap(cur->mapping, PAGE_SIZE);
tools/testing/selftests/x86/test_shadow_stack.c
644
free(cur);
tools/tracing/latency/latency-collector.c
1340
static void write_file(const char *file, const char *cur, const char *new,
tools/tracing/latency/latency-collector.c
1347
if (cur && !needs_change(cur, new))
tools/tracing/latency/latency-collector.c
554
const bool *cur)
tools/tracing/latency/latency-collector.c
560
if (state->opt_valid[i] && state->opt[i] != cur[i]) {
tools/tracing/latency/latency-collector.c
593
static void restore_file(const char *file, char **saved, const char *cur)
tools/tracing/latency/latency-collector.c
595
if (*saved && was_changed(*saved, cur)) {
virt/kvm/kvm_main.c
3731
ktime_t start, cur, poll_end;
virt/kvm/kvm_main.c
3741
start = cur = poll_end = ktime_get();
virt/kvm/kvm_main.c
3749
poll_end = cur = ktime_get();
virt/kvm/kvm_main.c
3750
} while (kvm_vcpu_can_poll(cur, stop));
virt/kvm/kvm_main.c
3755
cur = ktime_get();
virt/kvm/kvm_main.c
3758
ktime_to_ns(cur) - ktime_to_ns(poll_end);
virt/kvm/kvm_main.c
3760
ktime_to_ns(cur) - ktime_to_ns(poll_end));
virt/kvm/kvm_main.c
3764
halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);