Symbol: iter
arch/arm/kernel/crash_dump.c
19
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/arm/kernel/crash_dump.c
31
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/arm64/kernel/crash_dump.c
15
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/arm64/kernel/crash_dump.c
27
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/arm64/kvm/vgic/vgic-debug.c
105
struct vgic_state_iter *iter;
arch/arm64/kvm/vgic/vgic-debug.c
107
iter = kmalloc_obj(*iter);
arch/arm64/kvm/vgic/vgic-debug.c
108
if (!iter)
arch/arm64/kvm/vgic/vgic-debug.c
111
iter_init(kvm, iter, *pos);
arch/arm64/kvm/vgic/vgic-debug.c
113
if (end_of_vgic(iter)) {
arch/arm64/kvm/vgic/vgic-debug.c
114
kfree(iter);
arch/arm64/kvm/vgic/vgic-debug.c
115
iter = NULL;
arch/arm64/kvm/vgic/vgic-debug.c
118
return iter;
arch/arm64/kvm/vgic/vgic-debug.c
124
struct vgic_state_iter *iter = v;
arch/arm64/kvm/vgic/vgic-debug.c
127
iter_next(kvm, iter);
arch/arm64/kvm/vgic/vgic-debug.c
128
if (end_of_vgic(iter)) {
arch/arm64/kvm/vgic/vgic-debug.c
129
kfree(iter);
arch/arm64/kvm/vgic/vgic-debug.c
130
iter = NULL;
arch/arm64/kvm/vgic/vgic-debug.c
132
return iter;
arch/arm64/kvm/vgic/vgic-debug.c
137
struct vgic_state_iter *iter = v;
arch/arm64/kvm/vgic/vgic-debug.c
142
kfree(iter);
arch/arm64/kvm/vgic/vgic-debug.c
146
struct vgic_state_iter *iter)
arch/arm64/kvm/vgic/vgic-debug.c
237
struct vgic_state_iter *iter = v;
arch/arm64/kvm/vgic/vgic-debug.c
242
if (iter->dist_id == 0) {
arch/arm64/kvm/vgic/vgic-debug.c
243
print_dist_state(s, &kvm->arch.vgic, iter);
arch/arm64/kvm/vgic/vgic-debug.c
250
if (iter->vcpu_id < iter->nr_cpus)
arch/arm64/kvm/vgic/vgic-debug.c
251
vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
arch/arm64/kvm/vgic/vgic-debug.c
253
if (iter->intid < VGIC_NR_PRIVATE_IRQS)
arch/arm64/kvm/vgic/vgic-debug.c
254
irq = vgic_get_vcpu_irq(vcpu, iter->intid);
arch/arm64/kvm/vgic/vgic-debug.c
256
irq = vgic_get_irq(kvm, iter->intid);
arch/arm64/kvm/vgic/vgic-debug.c
313
static inline bool end_of_iter(struct vgic_its_iter *iter)
arch/arm64/kvm/vgic/vgic-debug.c
315
return !iter->dev && !iter->ite;
arch/arm64/kvm/vgic/vgic-debug.c
328
static void vgic_its_iter_next(struct vgic_its *its, struct vgic_its_iter *iter)
arch/arm64/kvm/vgic/vgic-debug.c
33
static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
arch/arm64/kvm/vgic/vgic-debug.c
330
struct its_device *dev = iter->dev;
arch/arm64/kvm/vgic/vgic-debug.c
331
struct its_ite *ite = iter->ite;
arch/arm64/kvm/vgic/vgic-debug.c
347
iter->dev = dev;
arch/arm64/kvm/vgic/vgic-debug.c
348
iter->ite = ite;
arch/arm64/kvm/vgic/vgic-debug.c
367
struct vgic_its_iter *iter;
arch/arm64/kvm/vgic/vgic-debug.c
37
if (iter->dist_id == 0) {
arch/arm64/kvm/vgic/vgic-debug.c
378
iter = kmalloc_obj(*iter);
arch/arm64/kvm/vgic/vgic-debug.c
379
if (!iter)
arch/arm64/kvm/vgic/vgic-debug.c
38
iter->dist_id++;
arch/arm64/kvm/vgic/vgic-debug.c
382
iter->dev = dev;
arch/arm64/kvm/vgic/vgic-debug.c
383
iter->ite = list_first_entry_or_null(&dev->itt_head,
arch/arm64/kvm/vgic/vgic-debug.c
386
while (!end_of_iter(iter) && offset--)
arch/arm64/kvm/vgic/vgic-debug.c
387
vgic_its_iter_next(its, iter);
arch/arm64/kvm/vgic/vgic-debug.c
389
if (end_of_iter(iter)) {
arch/arm64/kvm/vgic/vgic-debug.c
390
kfree(iter);
arch/arm64/kvm/vgic/vgic-debug.c
394
return iter;
arch/arm64/kvm/vgic/vgic-debug.c
412
struct vgic_its_iter *iter = v;
arch/arm64/kvm/vgic/vgic-debug.c
415
vgic_its_iter_next(its, iter);
arch/arm64/kvm/vgic/vgic-debug.c
417
if (end_of_iter(iter)) {
arch/arm64/kvm/vgic/vgic-debug.c
418
kfree(iter);
arch/arm64/kvm/vgic/vgic-debug.c
421
return iter;
arch/arm64/kvm/vgic/vgic-debug.c
434
struct vgic_its_iter *iter = v;
arch/arm64/kvm/vgic/vgic-debug.c
436
if (!IS_ERR_OR_NULL(iter))
arch/arm64/kvm/vgic/vgic-debug.c
437
kfree(iter);
arch/arm64/kvm/vgic/vgic-debug.c
453
struct vgic_its_iter *iter = v;
arch/arm64/kvm/vgic/vgic-debug.c
454
struct its_device *dev = iter->dev;
arch/arm64/kvm/vgic/vgic-debug.c
455
struct its_ite *ite = iter->ite;
arch/arm64/kvm/vgic/vgic-debug.c
46
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) {
arch/arm64/kvm/vgic/vgic-debug.c
47
if (iter->intid == VGIC_LPI_MAX_INTID + 1)
arch/arm64/kvm/vgic/vgic-debug.c
51
if (!xa_find_after(&dist->lpi_xa, &iter->intid,
arch/arm64/kvm/vgic/vgic-debug.c
53
iter->intid = VGIC_LPI_MAX_INTID + 1;
arch/arm64/kvm/vgic/vgic-debug.c
58
iter->intid++;
arch/arm64/kvm/vgic/vgic-debug.c
59
if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
arch/arm64/kvm/vgic/vgic-debug.c
60
++iter->vcpu_id < iter->nr_cpus)
arch/arm64/kvm/vgic/vgic-debug.c
61
iter->intid = 0;
arch/arm64/kvm/vgic/vgic-debug.c
79
static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
arch/arm64/kvm/vgic/vgic-debug.c
84
memset(iter, 0, sizeof(*iter));
arch/arm64/kvm/vgic/vgic-debug.c
86
iter->nr_cpus = nr_cpus;
arch/arm64/kvm/vgic/vgic-debug.c
87
iter->nr_spis = kvm->arch.vgic.nr_spis;
arch/arm64/kvm/vgic/vgic-debug.c
91
iter_next(kvm, iter);
arch/arm64/kvm/vgic/vgic-debug.c
94
static bool end_of_vgic(struct vgic_state_iter *iter)
arch/arm64/kvm/vgic/vgic-debug.c
96
return iter->dist_id > 0 &&
arch/arm64/kvm/vgic/vgic-debug.c
97
iter->vcpu_id == iter->nr_cpus &&
arch/arm64/kvm/vgic/vgic-debug.c
98
iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
arch/arm64/kvm/vgic/vgic-debug.c
99
iter->intid > VGIC_LPI_MAX_INTID;
arch/arm64/kvm/vgic/vgic-mmio-v3.c
323
struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
arch/arm64/kvm/vgic/vgic-mmio-v3.c
338
list_for_each_entry(iter, rd_regions, list) {
arch/arm64/kvm/vgic/vgic-mmio-v3.c
339
if (iter->base == end && iter->free_index > 0)
arch/loongarch/kernel/crash_dump.c
18
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/loongarch/kernel/crash_dump.c
6
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/m68k/emu/nfblock.c
64
struct bvec_iter iter;
arch/m68k/emu/nfblock.c
70
bio_for_each_segment(bvec, bio, iter) {
arch/mips/kernel/crash_dump.c
15
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/mips/kernel/crash_dump.c
6
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/mips/kernel/jump_label.c
103
for (iter = iter_start; iter < iter_stop; iter++) {
arch/mips/kernel/jump_label.c
105
if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
arch/mips/kernel/jump_label.c
106
arch_jump_label_transform(iter, JUMP_LABEL_NOP);
arch/mips/kernel/jump_label.c
97
struct jump_entry *iter;
arch/powerpc/kernel/cacheinfo.c
169
struct cache *iter;
arch/powerpc/kernel/cacheinfo.c
171
list_for_each_entry(iter, &cache_list, list)
arch/powerpc/kernel/cacheinfo.c
172
WARN_ONCE(iter->next_local == cache,
arch/powerpc/kernel/cacheinfo.c
174
iter->ofnode,
arch/powerpc/kernel/cacheinfo.c
175
cache_type_string(iter),
arch/powerpc/kernel/cacheinfo.c
307
struct cache *iter;
arch/powerpc/kernel/cacheinfo.c
313
list_for_each_entry(iter, &cache_list, list)
arch/powerpc/kernel/cacheinfo.c
314
if (iter->ofnode == cache->ofnode &&
arch/powerpc/kernel/cacheinfo.c
315
iter->group_id == cache->group_id &&
arch/powerpc/kernel/cacheinfo.c
316
iter->next_local == cache)
arch/powerpc/kernel/cacheinfo.c
317
return iter;
arch/powerpc/kernel/cacheinfo.c
327
struct cache *iter;
arch/powerpc/kernel/cacheinfo.c
329
list_for_each_entry(iter, &cache_list, list) {
arch/powerpc/kernel/cacheinfo.c
330
if (iter->ofnode != node ||
arch/powerpc/kernel/cacheinfo.c
331
iter->group_id != group_id)
arch/powerpc/kernel/cacheinfo.c
333
cache = cache_find_first_sibling(iter);
arch/powerpc/kernel/crash_dump.c
72
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/powerpc/kernel/crash_dump.c
86
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/powerpc/kernel/crash_dump.c
89
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/powerpc/kernel/nvram_64.c
650
static struct kmsg_dump_iter iter;
arch/powerpc/kernel/nvram_64.c
685
kmsg_dump_rewind(&iter);
arch/powerpc/kernel/nvram_64.c
686
kmsg_dump_get_buffer(&iter, false,
arch/powerpc/kernel/nvram_64.c
691
kmsg_dump_rewind(&iter);
arch/powerpc/kernel/nvram_64.c
692
kmsg_dump_get_buffer(&iter, false,
arch/powerpc/kernel/trace/ftrace.c
393
struct ftrace_rec_iter *iter;
arch/powerpc/kernel/trace/ftrace.c
397
for_ftrace_rec_iter(iter) {
arch/powerpc/kernel/trace/ftrace.c
398
rec = ftrace_rec_iter_record(iter);
arch/powerpc/kvm/book3s_hv_uvmem.c
364
struct kvmppc_uvmem_slot *p = NULL, *iter;
arch/powerpc/kvm/book3s_hv_uvmem.c
368
list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
arch/powerpc/kvm/book3s_hv_uvmem.c
369
if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
arch/powerpc/kvm/book3s_hv_uvmem.c
370
p = iter;
arch/powerpc/xmon/xmon.c
3057
struct kmsg_dump_iter iter;
arch/powerpc/xmon/xmon.c
3069
kmsg_dump_rewind(&iter);
arch/powerpc/xmon/xmon.c
3071
while (kmsg_dump_get_line(&iter, false, buf, sizeof(buf), &len)) {
arch/riscv/kernel/crash_dump.c
12
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/riscv/kernel/crash_dump.c
24
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/s390/include/asm/maccess.h
14
size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count);
arch/s390/include/asm/sclp.h
192
size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count);
arch/s390/kernel/crash_dump.c
118
static size_t copy_oldmem_iter(struct iov_iter *iter, unsigned long src, size_t count)
arch/s390/kernel/crash_dump.c
126
copied = memcpy_hsa_iter(iter, src, len);
arch/s390/kernel/crash_dump.c
138
copied = memcpy_real_iter(iter, src, len);
arch/s390/kernel/crash_dump.c
151
struct iov_iter iter;
arch/s390/kernel/crash_dump.c
156
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
arch/s390/kernel/crash_dump.c
157
if (copy_oldmem_iter(&iter, src, count) < count)
arch/s390/kernel/crash_dump.c
165
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
arch/s390/kernel/crash_dump.c
171
return copy_oldmem_iter(iter, src, csize);
arch/s390/kvm/diag.c
23
struct kvm_memslot_iter iter;
arch/s390/kvm/diag.c
30
kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
arch/s390/kvm/diag.c
31
slot = iter.slot;
arch/s390/kvm/gmap.c
152
struct radix_tree_iter iter;
arch/s390/kvm/gmap.c
162
radix_tree_for_each_slot(slot, root, &iter, index) {
arch/s390/kvm/gmap.c
163
indices[nr] = iter.index;
arch/s390/kvm/interrupt.c
1659
struct kvm_s390_interrupt_info *iter;
arch/s390/kvm/interrupt.c
1664
list_for_each_entry(iter, isc_list, list) {
arch/s390/kvm/interrupt.c
1665
if (schid && (id != iter->io.subchannel_id ||
arch/s390/kvm/interrupt.c
1666
nr != iter->io.subchannel_nr))
arch/s390/kvm/interrupt.c
1669
list_del_init(&iter->list);
arch/s390/kvm/interrupt.c
1674
return iter;
arch/s390/mm/maccess.c
103
copied = copy_to_iter(chunk, len, iter);
arch/s390/mm/maccess.c
117
struct iov_iter iter;
arch/s390/mm/maccess.c
122
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
arch/s390/mm/maccess.c
123
if (memcpy_real_iter(&iter, src, count) < count)
arch/s390/mm/maccess.c
83
size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
arch/sh/kernel/crash_dump.c
14
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/sh/kernel/crash_dump.c
23
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/sh/mm/asids-debugfs.c
29
static int asids_debugfs_show(struct seq_file *file, void *iter)
arch/sh/mm/cache-debugfs.c
25
static int cache_debugfs_show(struct seq_file *file, void *iter)
arch/sh/mm/pmb.c
145
struct pmb_entry *pmbe, *iter;
arch/sh/mm/pmb.c
175
for (iter = pmbe->link; iter; iter = iter->link)
arch/sh/mm/pmb.c
176
span += iter->size;
arch/sh/mm/pmb.c
815
static int pmb_debugfs_show(struct seq_file *file, void *iter)
arch/sh/mm/tlb-debugfs.c
39
static int tlb_seq_show(struct seq_file *file, void *iter)
arch/sparc/prom/bootstr_32.c
19
int iter;
arch/sparc/prom/bootstr_32.c
31
for (iter = 1; iter < 8; iter++) {
arch/sparc/prom/bootstr_32.c
32
arg = (*(romvec->pv_v0bootargs))->argv[iter];
arch/um/drivers/ubd_kern.c
1195
struct req_iterator iter;
arch/um/drivers/ubd_kern.c
1204
rq_for_each_segment(bvec, req, iter) {
arch/um/kernel/kmsg_dump.c
13
static struct kmsg_dump_iter iter;
arch/um/kernel/kmsg_dump.c
45
kmsg_dump_rewind(&iter);
arch/um/kernel/kmsg_dump.c
48
while (kmsg_dump_get_line(&iter, true, line, sizeof(line), &len)) {
arch/x86/events/intel/pt.c
1222
struct topa *topa, *iter;
arch/x86/events/intel/pt.c
1227
list_for_each_entry_safe(topa, iter, &buf->tables, list) {
arch/x86/kernel/cpu/microcode/intel.c
831
static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
arch/x86/kernel/cpu/microcode/intel.c
839
while (iov_iter_count(iter)) {
arch/x86/kernel/cpu/microcode/intel.c
844
if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
arch/x86/kernel/cpu/microcode/intel.c
855
if (data_size > iov_iter_count(iter)) {
arch/x86/kernel/cpu/microcode/intel.c
871
if (!copy_from_iter_full(data, data_size, iter) ||
arch/x86/kernel/cpu/microcode/intel.c
892
if (iov_iter_count(iter))
arch/x86/kernel/cpu/microcode/intel.c
934
struct iov_iter iter;
arch/x86/kernel/cpu/microcode/intel.c
952
iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size);
arch/x86/kernel/cpu/microcode/intel.c
953
ret = parse_microcode_blobs(cpu, &iter);
arch/x86/kernel/crash_dump_32.c
31
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
arch/x86/kernel/crash_dump_32.c
43
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/x86/kernel/crash_dump_64.c
15
static ssize_t __copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
arch/x86/kernel/crash_dump_64.c
32
csize = copy_to_iter(vaddr + offset, csize, iter);
arch/x86/kernel/crash_dump_64.c
38
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize,
arch/x86/kernel/crash_dump_64.c
41
return __copy_oldmem_page(iter, pfn, csize, offset, false);
arch/x86/kernel/crash_dump_64.c
49
ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
arch/x86/kernel/crash_dump_64.c
52
return __copy_oldmem_page(iter, pfn, csize, offset, true);
arch/x86/kernel/crash_dump_64.c
58
struct iov_iter iter;
arch/x86/kernel/crash_dump_64.c
60
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
arch/x86/kernel/crash_dump_64.c
62
return read_from_oldmem(&iter, count, ppos,
arch/x86/kernel/ftrace.c
205
struct ftrace_rec_iter *iter;
arch/x86/kernel/ftrace.c
210
for_ftrace_rec_iter(iter) {
arch/x86/kernel/ftrace.c
211
rec = ftrace_rec_iter_record(iter);
arch/x86/kernel/ftrace.c
237
for_ftrace_rec_iter(iter) {
arch/x86/kernel/ftrace.c
238
rec = ftrace_rec_iter_record(iter);
arch/x86/kvm/mmu/mmu.c
1227
struct rmap_iterator *iter)
arch/x86/kvm/mmu/mmu.c
1235
iter->desc = NULL;
arch/x86/kvm/mmu/mmu.c
1239
iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
arch/x86/kvm/mmu/mmu.c
1240
iter->pos = 0;
arch/x86/kvm/mmu/mmu.c
1241
return iter->desc->sptes[iter->pos];
arch/x86/kvm/mmu/mmu.c
1249
static u64 *rmap_get_next(struct rmap_iterator *iter)
arch/x86/kvm/mmu/mmu.c
1251
if (iter->desc) {
arch/x86/kvm/mmu/mmu.c
1252
if (iter->pos < PTE_LIST_EXT - 1) {
arch/x86/kvm/mmu/mmu.c
1253
++iter->pos;
arch/x86/kvm/mmu/mmu.c
1254
if (iter->desc->sptes[iter->pos])
arch/x86/kvm/mmu/mmu.c
1255
return iter->desc->sptes[iter->pos];
arch/x86/kvm/mmu/mmu.c
1258
iter->desc = iter->desc->more;
arch/x86/kvm/mmu/mmu.c
1260
if (iter->desc) {
arch/x86/kvm/mmu/mmu.c
1261
iter->pos = 0;
arch/x86/kvm/mmu/mmu.c
1263
return iter->desc->sptes[iter->pos];
arch/x86/kvm/mmu/mmu.c
1335
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
1338
for_each_rmap_spte(rmap_head, &iter, sptep)
arch/x86/kvm/mmu/mmu.c
1363
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
1366
for_each_rmap_spte(rmap_head, &iter, sptep) {
arch/x86/kvm/mmu/mmu.c
1716
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
1730
for_each_rmap_spte_lockless(rmap_head, &iter, sptep, spte) {
arch/x86/kvm/mmu/mmu.c
1862
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
1864
for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
arch/x86/kvm/mmu/mmu.c
2650
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
2652
while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
arch/x86/kvm/mmu/mmu.c
6892
struct kvm_memslot_iter iter;
arch/x86/kvm/mmu/mmu.c
6903
kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
arch/x86/kvm/mmu/mmu.c
6904
memslot = iter.slot;
arch/x86/kvm/mmu/mmu.c
7143
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
7149
for_each_rmap_spte(rmap_head, &iter, huge_sptep) {
arch/x86/kvm/mmu/mmu.c
7259
struct rmap_iterator iter;
arch/x86/kvm/mmu/mmu.c
7264
for_each_rmap_spte(rmap_head, &iter, sptep) {
arch/x86/kvm/mmu/tdp_iter.c
100
tdp_iter_refresh_sptep(iter);
arch/x86/kvm/mmu/tdp_iter.c
112
static bool try_step_side(struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_iter.c
118
if (SPTE_INDEX((iter->gfn | iter->gfn_bits) << PAGE_SHIFT, iter->level) ==
arch/x86/kvm/mmu/tdp_iter.c
12
static void tdp_iter_refresh_sptep(struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_iter.c
122
iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
arch/x86/kvm/mmu/tdp_iter.c
123
iter->next_last_level_gfn = iter->gfn;
arch/x86/kvm/mmu/tdp_iter.c
124
iter->sptep++;
arch/x86/kvm/mmu/tdp_iter.c
125
iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
arch/x86/kvm/mmu/tdp_iter.c
135
static bool try_step_up(struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_iter.c
137
if (iter->level == iter->root_level)
arch/x86/kvm/mmu/tdp_iter.c
14
iter->sptep = iter->pt_path[iter->level - 1] +
arch/x86/kvm/mmu/tdp_iter.c
140
iter->level++;
arch/x86/kvm/mmu/tdp_iter.c
141
iter->gfn = gfn_round_for_level(iter->gfn, iter->level);
arch/x86/kvm/mmu/tdp_iter.c
142
tdp_iter_refresh_sptep(iter);
arch/x86/kvm/mmu/tdp_iter.c
15
SPTE_INDEX((iter->gfn | iter->gfn_bits) << PAGE_SHIFT, iter->level);
arch/x86/kvm/mmu/tdp_iter.c
16
iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
arch/x86/kvm/mmu/tdp_iter.c
163
void tdp_iter_next(struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_iter.c
165
if (iter->yielded) {
arch/x86/kvm/mmu/tdp_iter.c
166
tdp_iter_restart(iter);
arch/x86/kvm/mmu/tdp_iter.c
170
if (try_step_down(iter))
arch/x86/kvm/mmu/tdp_iter.c
174
if (try_step_side(iter))
arch/x86/kvm/mmu/tdp_iter.c
176
} while (try_step_up(iter));
arch/x86/kvm/mmu/tdp_iter.c
177
iter->valid = false;
arch/x86/kvm/mmu/tdp_iter.c
23
void tdp_iter_restart(struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_iter.c
25
iter->yielded = false;
arch/x86/kvm/mmu/tdp_iter.c
26
iter->yielded_gfn = iter->next_last_level_gfn;
arch/x86/kvm/mmu/tdp_iter.c
27
iter->level = iter->root_level;
arch/x86/kvm/mmu/tdp_iter.c
29
iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level);
arch/x86/kvm/mmu/tdp_iter.c
30
tdp_iter_refresh_sptep(iter);
arch/x86/kvm/mmu/tdp_iter.c
32
iter->valid = true;
arch/x86/kvm/mmu/tdp_iter.c
39
void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
arch/x86/kvm/mmu/tdp_iter.c
45
iter->valid = false;
arch/x86/kvm/mmu/tdp_iter.c
49
iter->next_last_level_gfn = next_last_level_gfn;
arch/x86/kvm/mmu/tdp_iter.c
50
iter->gfn_bits = gfn_bits;
arch/x86/kvm/mmu/tdp_iter.c
51
iter->root_level = root->role.level;
arch/x86/kvm/mmu/tdp_iter.c
52
iter->min_level = min_level;
arch/x86/kvm/mmu/tdp_iter.c
53
iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root->spt;
arch/x86/kvm/mmu/tdp_iter.c
54
iter->as_id = kvm_mmu_page_as_id(root);
arch/x86/kvm/mmu/tdp_iter.c
56
tdp_iter_restart(iter);
arch/x86/kvm/mmu/tdp_iter.c
80
static bool try_step_down(struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_iter.c
84
if (iter->level == iter->min_level)
arch/x86/kvm/mmu/tdp_iter.c
91
iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep);
arch/x86/kvm/mmu/tdp_iter.c
93
child_pt = spte_to_child_pt(iter->old_spte, iter->level);
arch/x86/kvm/mmu/tdp_iter.c
97
iter->level--;
arch/x86/kvm/mmu/tdp_iter.c
98
iter->pt_path[iter->level - 1] = child_pt;
arch/x86/kvm/mmu/tdp_iter.c
99
iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level);
arch/x86/kvm/mmu/tdp_iter.h
123
#define for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) \
arch/x86/kvm/mmu/tdp_iter.h
124
for (tdp_iter_start(&iter, root, min_level, start, kvm_gfn_root_bits(kvm, root)); \
arch/x86/kvm/mmu/tdp_iter.h
125
iter.valid && iter.gfn < end; \
arch/x86/kvm/mmu/tdp_iter.h
126
tdp_iter_next(&iter))
arch/x86/kvm/mmu/tdp_iter.h
128
#define for_each_tdp_pte_min_level_all(iter, root, min_level) \
arch/x86/kvm/mmu/tdp_iter.h
129
for (tdp_iter_start(&iter, root, min_level, 0, 0); \
arch/x86/kvm/mmu/tdp_iter.h
130
iter.valid && iter.gfn < tdp_mmu_max_gfn_exclusive(); \
arch/x86/kvm/mmu/tdp_iter.h
131
tdp_iter_next(&iter))
arch/x86/kvm/mmu/tdp_iter.h
133
#define for_each_tdp_pte(iter, kvm, root, start, end) \
arch/x86/kvm/mmu/tdp_iter.h
134
for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end)
arch/x86/kvm/mmu/tdp_iter.h
138
void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
arch/x86/kvm/mmu/tdp_iter.h
140
void tdp_iter_next(struct tdp_iter *iter);
arch/x86/kvm/mmu/tdp_iter.h
141
void tdp_iter_restart(struct tdp_iter *iter);
arch/x86/kvm/mmu/tdp_mmu.c
1002
tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
arch/x86/kvm/mmu/tdp_mmu.c
1170
struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_mmu.c
1172
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
arch/x86/kvm/mmu/tdp_mmu.c
1180
if (is_shadow_present_pte(iter->old_spte) &&
arch/x86/kvm/mmu/tdp_mmu.c
1181
(fault->prefetch || is_access_allowed(fault, iter->old_spte)) &&
arch/x86/kvm/mmu/tdp_mmu.c
1182
is_last_spte(iter->old_spte, iter->level)) {
arch/x86/kvm/mmu/tdp_mmu.c
1183
WARN_ON_ONCE(fault->pfn != spte_to_pfn(iter->old_spte));
arch/x86/kvm/mmu/tdp_mmu.c
1188
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
arch/x86/kvm/mmu/tdp_mmu.c
1190
wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
arch/x86/kvm/mmu/tdp_mmu.c
1191
fault->pfn, iter->old_spte, fault->prefetch,
arch/x86/kvm/mmu/tdp_mmu.c
1194
if (new_spte == iter->old_spte)
arch/x86/kvm/mmu/tdp_mmu.c
1196
else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1198
else if (is_shadow_present_pte(iter->old_spte) &&
arch/x86/kvm/mmu/tdp_mmu.c
1199
(!is_last_spte(iter->old_spte, iter->level) ||
arch/x86/kvm/mmu/tdp_mmu.c
1200
WARN_ON_ONCE(leaf_spte_change_needs_tlb_flush(iter->old_spte, new_spte))))
arch/x86/kvm/mmu/tdp_mmu.c
1201
kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
arch/x86/kvm/mmu/tdp_mmu.c
1214
trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
arch/x86/kvm/mmu/tdp_mmu.c
1218
trace_kvm_mmu_set_spte(iter->level, iter->gfn,
arch/x86/kvm/mmu/tdp_mmu.c
1219
rcu_dereference(iter->sptep));
arch/x86/kvm/mmu/tdp_mmu.c
1237
static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
1244
ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
arch/x86/kvm/mmu/tdp_mmu.c
1248
tdp_mmu_iter_set_spte(kvm, iter, spte);
arch/x86/kvm/mmu/tdp_mmu.c
1256
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
1267
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1279
for_each_tdp_pte(iter, kvm, root, fault->gfn, fault->gfn + 1) {
arch/x86/kvm/mmu/tdp_mmu.c
1283
disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
arch/x86/kvm/mmu/tdp_mmu.c
1289
if (is_frozen_spte(iter.old_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1292
if (iter.level == fault->goal_level)
arch/x86/kvm/mmu/tdp_mmu.c
1296
if (is_shadow_present_pte(iter.old_spte) &&
arch/x86/kvm/mmu/tdp_mmu.c
1297
!is_large_pte(iter.old_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1305
tdp_mmu_init_child_sp(sp, &iter);
arch/x86/kvm/mmu/tdp_mmu.c
1311
if (is_shadow_present_pte(iter.old_spte)) {
arch/x86/kvm/mmu/tdp_mmu.c
1313
KVM_BUG_ON(is_mirror_sptep(iter.sptep), vcpu->kvm);
arch/x86/kvm/mmu/tdp_mmu.c
1314
r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
arch/x86/kvm/mmu/tdp_mmu.c
1316
r = tdp_mmu_link_sp(kvm, &iter, sp, true);
arch/x86/kvm/mmu/tdp_mmu.c
1329
fault->req_level >= iter.level) {
arch/x86/kvm/mmu/tdp_mmu.c
1341
WARN_ON_ONCE(iter.level == fault->goal_level);
arch/x86/kvm/mmu/tdp_mmu.c
1345
ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
arch/x86/kvm/mmu/tdp_mmu.c
1376
static void kvm_tdp_mmu_age_spte(struct kvm *kvm, struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_mmu.c
1380
if (spte_ad_enabled(iter->old_spte)) {
arch/x86/kvm/mmu/tdp_mmu.c
1381
iter->old_spte = tdp_mmu_clear_spte_bits_atomic(iter->sptep,
arch/x86/kvm/mmu/tdp_mmu.c
1383
new_spte = iter->old_spte & ~shadow_accessed_mask;
arch/x86/kvm/mmu/tdp_mmu.c
1385
new_spte = mark_spte_for_access_track(iter->old_spte);
arch/x86/kvm/mmu/tdp_mmu.c
1390
if (__tdp_mmu_set_spte_atomic(kvm, iter, new_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1394
trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
arch/x86/kvm/mmu/tdp_mmu.c
1395
iter->old_spte, new_spte);
arch/x86/kvm/mmu/tdp_mmu.c
1404
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1419
tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {
arch/x86/kvm/mmu/tdp_mmu.c
1420
if (!is_accessed_spte(iter.old_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1427
kvm_tdp_mmu_age_spte(kvm, &iter);
arch/x86/kvm/mmu/tdp_mmu.c
1452
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1460
for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) {
arch/x86/kvm/mmu/tdp_mmu.c
1462
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
arch/x86/kvm/mmu/tdp_mmu.c
1465
if (!is_shadow_present_pte(iter.old_spte) ||
arch/x86/kvm/mmu/tdp_mmu.c
1466
!is_last_spte(iter.old_spte, iter.level) ||
arch/x86/kvm/mmu/tdp_mmu.c
1467
!(iter.old_spte & PT_WRITABLE_MASK))
arch/x86/kvm/mmu/tdp_mmu.c
1470
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
arch/x86/kvm/mmu/tdp_mmu.c
1472
if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1520
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
1523
const u64 huge_spte = iter->old_spte;
arch/x86/kvm/mmu/tdp_mmu.c
1524
const int level = iter->level;
arch/x86/kvm/mmu/tdp_mmu.c
1542
ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
arch/x86/kvm/mmu/tdp_mmu.c
1554
trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
arch/x86/kvm/mmu/tdp_mmu.c
1564
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1579
for_each_tdp_pte_min_level(iter, kvm, root, target_level + 1, start, end) {
arch/x86/kvm/mmu/tdp_mmu.c
1581
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
arch/x86/kvm/mmu/tdp_mmu.c
1584
if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1603
trace_kvm_mmu_split_huge_page(iter.gfn,
arch/x86/kvm/mmu/tdp_mmu.c
1604
iter.old_spte,
arch/x86/kvm/mmu/tdp_mmu.c
1605
iter.level, -ENOMEM);
arch/x86/kvm/mmu/tdp_mmu.c
1611
iter.yielded = true;
arch/x86/kvm/mmu/tdp_mmu.c
1615
tdp_mmu_init_child_sp(sp, &iter);
arch/x86/kvm/mmu/tdp_mmu.c
1617
if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
arch/x86/kvm/mmu/tdp_mmu.c
1673
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1677
tdp_root_for_each_pte(iter, kvm, root, start, end) {
arch/x86/kvm/mmu/tdp_mmu.c
1679
if (!is_shadow_present_pte(iter.old_spte) ||
arch/x86/kvm/mmu/tdp_mmu.c
1680
!is_last_spte(iter.old_spte, iter.level))
arch/x86/kvm/mmu/tdp_mmu.c
1683
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
arch/x86/kvm/mmu/tdp_mmu.c
1687
spte_ad_need_write_protect(iter.old_spte));
arch/x86/kvm/mmu/tdp_mmu.c
1689
if (!(iter.old_spte & dbit))
arch/x86/kvm/mmu/tdp_mmu.c
1692
if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
arch/x86/kvm/mmu/tdp_mmu.c
1719
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1725
tdp_root_for_each_leaf_pte(iter, kvm, root, gfn + __ffs(mask),
arch/x86/kvm/mmu/tdp_mmu.c
1731
spte_ad_need_write_protect(iter.old_spte));
arch/x86/kvm/mmu/tdp_mmu.c
1733
if (iter.level > PG_LEVEL_4K ||
arch/x86/kvm/mmu/tdp_mmu.c
1734
!(mask & (1UL << (iter.gfn - gfn))))
arch/x86/kvm/mmu/tdp_mmu.c
1737
mask &= ~(1UL << (iter.gfn - gfn));
arch/x86/kvm/mmu/tdp_mmu.c
1739
if (!(iter.old_spte & dbit))
arch/x86/kvm/mmu/tdp_mmu.c
1742
iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
arch/x86/kvm/mmu/tdp_mmu.c
1743
iter.old_spte, dbit,
arch/x86/kvm/mmu/tdp_mmu.c
1744
iter.level);
arch/x86/kvm/mmu/tdp_mmu.c
1746
trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
arch/x86/kvm/mmu/tdp_mmu.c
1747
iter.old_spte,
arch/x86/kvm/mmu/tdp_mmu.c
1748
iter.old_spte & ~dbit);
arch/x86/kvm/mmu/tdp_mmu.c
1777
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1779
tdp_root_for_each_leaf_pte(iter, kvm, root, start, end) {
arch/x86/kvm/mmu/tdp_mmu.c
1789
*huge_spte = make_huge_spte(kvm, iter.old_spte, parent->level);
arch/x86/kvm/mmu/tdp_mmu.c
1802
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1813
for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_2M, start, end) {
arch/x86/kvm/mmu/tdp_mmu.c
1815
if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
arch/x86/kvm/mmu/tdp_mmu.c
1820
if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
arch/x86/kvm/mmu/tdp_mmu.c
1821
!is_shadow_present_pte(iter.old_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1829
if (is_last_spte(iter.old_spte, iter.level))
arch/x86/kvm/mmu/tdp_mmu.c
1839
if (iter.gfn < start || iter.gfn >= end)
arch/x86/kvm/mmu/tdp_mmu.c
1842
max_mapping_level = kvm_mmu_max_mapping_level(kvm, NULL, slot, iter.gfn);
arch/x86/kvm/mmu/tdp_mmu.c
1843
if (max_mapping_level < iter.level)
arch/x86/kvm/mmu/tdp_mmu.c
1846
r = tdp_mmu_make_huge_spte(kvm, &iter, &huge_spte);
arch/x86/kvm/mmu/tdp_mmu.c
1852
if (tdp_mmu_set_spte_atomic(kvm, &iter, huge_spte))
arch/x86/kvm/mmu/tdp_mmu.c
1886
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1894
for_each_tdp_pte_min_level(iter, kvm, root, min_level, gfn, gfn + 1) {
arch/x86/kvm/mmu/tdp_mmu.c
1895
if (!is_shadow_present_pte(iter.old_spte) ||
arch/x86/kvm/mmu/tdp_mmu.c
1896
!is_last_spte(iter.old_spte, iter.level))
arch/x86/kvm/mmu/tdp_mmu.c
1899
new_spte = iter.old_spte &
arch/x86/kvm/mmu/tdp_mmu.c
1902
if (new_spte == iter.old_spte)
arch/x86/kvm/mmu/tdp_mmu.c
1905
tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
arch/x86/kvm/mmu/tdp_mmu.c
1943
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1949
for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
arch/x86/kvm/mmu/tdp_mmu.c
1950
leaf = iter.level;
arch/x86/kvm/mmu/tdp_mmu.c
1951
sptes[leaf] = iter.old_spte;
arch/x86/kvm/mmu/tdp_mmu.c
1973
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
1976
for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
arch/x86/kvm/mmu/tdp_mmu.c
1977
*spte = iter.old_spte;
arch/x86/kvm/mmu/tdp_mmu.c
1978
sptep = iter.sptep;
arch/x86/kvm/mmu/tdp_mmu.c
240
struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_mmu.c
245
parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
arch/x86/kvm/mmu/tdp_mmu.c
250
tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
arch/x86/kvm/mmu/tdp_mmu.c
651
struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
660
WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));
arch/x86/kvm/mmu/tdp_mmu.c
662
if (is_mirror_sptep(iter->sptep) && !is_frozen_spte(new_spte)) {
arch/x86/kvm/mmu/tdp_mmu.c
672
ret = set_external_spte_present(kvm, iter->sptep, iter->gfn,
arch/x86/kvm/mmu/tdp_mmu.c
673
iter->old_spte, new_spte, iter->level);
arch/x86/kvm/mmu/tdp_mmu.c
677
u64 *sptep = rcu_dereference(iter->sptep);
arch/x86/kvm/mmu/tdp_mmu.c
687
if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
arch/x86/kvm/mmu/tdp_mmu.c
712
struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
719
ret = __tdp_mmu_set_spte_atomic(kvm, iter, new_spte);
arch/x86/kvm/mmu/tdp_mmu.c
723
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
arch/x86/kvm/mmu/tdp_mmu.c
724
new_spte, iter->level, true);
arch/x86/kvm/mmu/tdp_mmu.c
772
static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
775
WARN_ON_ONCE(iter->yielded);
arch/x86/kvm/mmu/tdp_mmu.c
776
iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
arch/x86/kvm/mmu/tdp_mmu.c
777
iter->old_spte, new_spte,
arch/x86/kvm/mmu/tdp_mmu.c
778
iter->gfn, iter->level);
arch/x86/kvm/mmu/tdp_mmu.c
792
struct tdp_iter *iter)
arch/x86/kvm/mmu/tdp_mmu.c
798
return iter->next_last_level_gfn != iter->yielded_gfn;
arch/x86/kvm/mmu/tdp_mmu.c
816
struct tdp_iter *iter,
arch/x86/kvm/mmu/tdp_mmu.c
819
KVM_MMU_WARN_ON(iter->yielded);
arch/x86/kvm/mmu/tdp_mmu.c
821
if (!tdp_mmu_iter_need_resched(kvm, iter))
arch/x86/kvm/mmu/tdp_mmu.c
836
WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
arch/x86/kvm/mmu/tdp_mmu.c
838
iter->yielded = true;
arch/x86/kvm/mmu/tdp_mmu.c
856
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
858
for_each_tdp_pte_min_level_all(iter, root, zap_level) {
arch/x86/kvm/mmu/tdp_mmu.c
860
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
arch/x86/kvm/mmu/tdp_mmu.c
863
if (!is_shadow_present_pte(iter.old_spte))
arch/x86/kvm/mmu/tdp_mmu.c
866
if (iter.level > zap_level)
arch/x86/kvm/mmu/tdp_mmu.c
870
tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
arch/x86/kvm/mmu/tdp_mmu.c
871
else if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE))
arch/x86/kvm/mmu/tdp_mmu.c
926
struct tdp_iter iter = {
arch/x86/kvm/mmu/tdp_mmu.c
954
if ((tdp_ptep_t)sp->spt != spte_to_child_pt(iter.old_spte, iter.level))
arch/x86/kvm/mmu/tdp_mmu.c
965
if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE)) {
arch/x86/kvm/mmu/tdp_mmu.c
966
WARN_ON_ONCE((tdp_ptep_t)sp->spt == spte_to_child_pt(iter.old_spte, iter.level));
arch/x86/kvm/mmu/tdp_mmu.c
983
struct tdp_iter iter;
arch/x86/kvm/mmu/tdp_mmu.c
991
for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) {
arch/x86/kvm/mmu/tdp_mmu.c
993
tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
arch/x86/kvm/mmu/tdp_mmu.c
998
if (!is_shadow_present_pte(iter.old_spte) ||
arch/x86/kvm/mmu/tdp_mmu.c
999
!is_last_spte(iter.old_spte, iter.level))
arch/xtensa/platforms/iss/simdisk.c
108
struct bvec_iter iter;
arch/xtensa/platforms/iss/simdisk.c
111
bio_for_each_segment(bvec, bio, iter) {
block/bio-integrity.c
203
struct iov_iter iter;
block/bio-integrity.c
212
iov_iter_bvec(&iter, ITER_SOURCE, bvec, nr_vecs, len);
block/bio-integrity.c
213
if (!copy_from_iter_full(buf, len, &iter)) {
block/bio-integrity.c
305
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
block/bio-integrity.c
311
size_t offset, bytes = iter->count;
block/bio-integrity.c
321
nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1);
block/bio-integrity.c
331
copy = iov_iter_alignment(iter) &
block/bio-integrity.c
337
ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs,
block/bio-integrity.c
397
it = meta->iter;
block/bio-integrity.c
413
iov_iter_advance(&meta->iter, integrity_bytes);
block/bio.c
1198
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
block/bio.c
1202
bio->bi_io_vec = (struct bio_vec *)iter->bvec;
block/bio.c
1204
bio->bi_iter.bi_bvec_done = iter->iov_offset;
block/bio.c
1205
bio->bi_iter.bi_size = iov_iter_count(iter);
block/bio.c
1214
static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
block/bio.c
1222
iov_iter_revert(iter, nbytes);
block/bio.c
1265
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
block/bio.c
1273
if (iov_iter_is_bvec(iter)) {
block/bio.c
1274
bio_iov_bvec_set(bio, iter);
block/bio.c
1275
iov_iter_advance(iter, bio->bi_iter.bi_size);
block/bio.c
1279
if (iov_iter_extract_will_pin(iter))
block/bio.c
1287
ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec,
block/bio.c
1296
} while (iov_iter_count(iter) && !bio_full(bio, 0));
block/bio.c
1300
return bio_iov_iter_align_down(bio, iter, len_align_mask);
block/bio.c
1330
static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter)
block/bio.c
1332
size_t total_len = iov_iter_count(iter);
block/bio.c
1356
if (copy_from_iter(folio_address(folio), this_len, iter) !=
block/bio.c
1370
static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter)
block/bio.c
1372
size_t len = min(iov_iter_count(iter), SZ_1M);
block/bio.c
1382
ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec + 1, len,
block/bio.c
1402
if (iov_iter_extract_will_pin(iter))
block/bio.c
1418
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter)
block/bio.c
1421
return bio_iov_iter_bounce_write(bio, iter);
block/bio.c
1422
return bio_iov_iter_bounce_read(bio, iter);
block/bio.c
668
struct bvec_iter iter;
block/bio.c
670
__bio_for_each_segment(bv, bio, iter, start)
block/bio.c
688
struct bvec_iter iter;
block/bio.c
698
bio_for_each_segment(bv, bio, iter) {
block/blk-cgroup.c
1140
struct class_dev_iter iter;
block/blk-cgroup.c
1143
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
block/blk-cgroup.c
1144
while ((dev = class_dev_iter_next(&iter))) {
block/blk-cgroup.c
1175
class_dev_iter_exit(&iter);
block/blk-crypto-fallback.c
374
struct bio_crypt_ctx *bc, struct bvec_iter iter,
block/blk-crypto-fallback.c
395
__bio_for_each_segment(bv, bio, iter, iter) {
block/blk-integrity.c
127
struct iov_iter iter;
block/blk-integrity.c
129
iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
block/blk-integrity.c
130
ret = bio_integrity_map_user(rq->bio, &iter);
block/blk-integrity.c
33
struct bvec_iter iter;
block/blk-integrity.c
36
bio_for_each_integrity_vec(iv, bio, iter) {
block/blk-map.c
109
&iter);
block/blk-map.c
111
if (!iov_iter_count(&iter))
block/blk-map.c
142
ret = bio_copy_to_iter(bio, bmd->iter);
block/blk-map.c
151
struct iov_iter *iter, gfp_t gfp_mask)
block/blk-map.c
158
unsigned int len = iter->count;
block/blk-map.c
161
bmd = bio_alloc_map_data(iter, gfp_mask);
block/blk-map.c
17
struct iov_iter iter;
block/blk-map.c
226
if (iov_iter_rw(iter) == WRITE &&
block/blk-map.c
228
ret = bio_copy_from_iter(bio, iter);
block/blk-map.c
232
struct iov_iter iter2 = *iter;
block/blk-map.c
242
iov_iter_advance(iter, bio->bi_iter.bi_size);
block/blk-map.c
260
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
block/blk-map.c
263
unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
block/blk-map.c
267
if (!iov_iter_count(iter))
block/blk-map.c
277
ret = bio_iov_iter_get_pages(bio, iter, 0);
block/blk-map.c
32
bmd->iter = *data;
block/blk-map.c
35
bmd->iter.__iov = bmd->iov;
block/blk-map.c
463
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
block/blk-map.c
469
if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
block/blk-map.c
476
bio_iov_bvec_set(bio, iter);
block/blk-map.c
501
const struct iov_iter *iter, gfp_t gfp_mask)
block/blk-map.c
511
else if (iov_iter_alignment(iter) & align)
block/blk-map.c
513
else if (iov_iter_is_bvec(iter))
block/blk-map.c
515
else if (!user_backed_iter(iter))
block/blk-map.c
518
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
block/blk-map.c
521
ret = blk_rq_map_user_bvec(rq, iter);
block/blk-map.c
530
i = *iter;
block/blk-map.c
578
struct iov_iter iter;
block/blk-map.c
581
UIO_FASTIOV, &iov, &iter);
block/blk-map.c
587
iov_iter_truncate(&iter, buf_len);
block/blk-map.c
588
if (check_iter_count && !iov_iter_count(&iter)) {
block/blk-map.c
594
ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
block/blk-map.c
67
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
block/blk-map.c
78
iter);
block/blk-map.c
80
if (!iov_iter_count(iter))
block/blk-map.c
98
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
block/blk-merge.c
27
struct bvec_iter iter = bio->bi_iter;
block/blk-merge.c
34
bio_advance_iter(bio, &iter, iter.bi_size);
block/blk-merge.c
343
struct bvec_iter iter;
block/blk-merge.c
351
bio_for_each_bvec(bv, bio, iter) {
block/blk-merge.c
36
if (!iter.bi_bvec_done)
block/blk-merge.c
37
idx = iter.bi_idx - 1;
block/blk-merge.c
39
idx = iter.bi_idx;
block/blk-merge.c
47
if (iter.bi_bvec_done)
block/blk-merge.c
48
bv->bv_len = iter.bi_bvec_done;
block/blk-merge.c
493
struct req_iterator iter;
block/blk-merge.c
516
rq_for_each_bvec(bv, rq, iter)
block/blk-mq-dma.c
101
iter->len = vec->len;
block/blk-mq-dma.c
106
struct dma_iova_state *state, struct blk_dma_iter *iter,
block/blk-mq-dma.c
11
if (iter->iter.bi_size)
block/blk-mq-dma.c
114
iter->addr = state->addr;
block/blk-mq-dma.c
115
iter->len = dma_iova_size(state);
block/blk-mq-dma.c
117
if (iter->p2pdma.map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
block/blk-mq-dma.c
126
} while (blk_map_iter_next(req, &iter->iter, vec));
block/blk-mq-dma.c
13
if (!iter->bio || !iter->bio->bi_next)
block/blk-mq-dma.c
136
iter->status = errno_to_blk_status(error);
block/blk-mq-dma.c
141
struct blk_map_iter *iter)
block/blk-mq-dma.c
146
*iter = (struct blk_map_iter) {
block/blk-mq-dma.c
148
.iter = {
block/blk-mq-dma.c
153
*iter = (struct blk_map_iter) {
block/blk-mq-dma.c
156
.iter = bio->bi_iter,
block/blk-mq-dma.c
16
iter->bio = iter->bio->bi_next;
block/blk-mq-dma.c
160
*iter = (struct blk_map_iter) {};
block/blk-mq-dma.c
165
struct dma_iova_state *state, struct blk_dma_iter *iter,
block/blk-mq-dma.c
17
if (iter->is_integrity) {
block/blk-mq-dma.c
170
memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
block/blk-mq-dma.c
171
iter->status = BLK_STS_OK;
block/blk-mq-dma.c
172
iter->p2pdma.map = PCI_P2PDMA_MAP_NONE;
block/blk-mq-dma.c
178
if (!blk_map_iter_next(req, &iter->iter, &vec))
block/blk-mq-dma.c
18
iter->iter = bio_integrity(iter->bio)->bip_iter;
block/blk-mq-dma.c
181
switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
block/blk-mq-dma.c
184
return blk_dma_map_bus(iter, &vec);
block/blk-mq-dma.c
19
iter->bvecs = bio_integrity(iter->bio)->bip_vec;
block/blk-mq-dma.c
193
iter->status = BLK_STS_INVAL;
block/blk-mq-dma.c
199
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
block/blk-mq-dma.c
201
return blk_dma_map_direct(req, dma_dev, iter, &vec);
block/blk-mq-dma.c
21
iter->iter = iter->bio->bi_iter;
block/blk-mq-dma.c
22
iter->bvecs = iter->bio->bi_io_vec;
block/blk-mq-dma.c
227
struct dma_iova_state *state, struct blk_dma_iter *iter)
block/blk-mq-dma.c
229
blk_rq_map_iter_init(req, &iter->iter);
block/blk-mq-dma.c
230
return blk_dma_map_iter_start(req, dma_dev, state, iter,
block/blk-mq-dma.c
253
struct blk_dma_iter *iter)
block/blk-mq-dma.c
257
if (!blk_map_iter_next(req, &iter->iter, &vec))
block/blk-mq-dma.c
260
if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
block/blk-mq-dma.c
261
return blk_dma_map_bus(iter, &vec);
block/blk-mq-dma.c
262
return blk_dma_map_direct(req, dma_dev, iter, &vec);
block/blk-mq-dma.c
27
static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter,
block/blk-mq-dma.c
290
struct blk_map_iter iter;
block/blk-mq-dma.c
294
blk_rq_map_iter_init(rq, &iter);
block/blk-mq-dma.c
295
while (blk_map_iter_next(rq, &iter, &vec)) {
block/blk-mq-dma.c
33
if (!iter->iter.bi_size)
block/blk-mq-dma.c
343
struct blk_dma_iter *iter)
block/blk-mq-dma.c
349
iter->iter = (struct blk_map_iter) {
block/blk-mq-dma.c
351
.iter = bio_integrity(bio)->bip_iter,
block/blk-mq-dma.c
355
return blk_dma_map_iter_start(req, dma_dev, state, iter, len);
block/blk-mq-dma.c
36
bv = mp_bvec_iter_bvec(iter->bvecs, iter->iter);
block/blk-mq-dma.c
379
struct device *dma_dev, struct blk_dma_iter *iter)
block/blk-mq-dma.c
383
if (!blk_map_iter_next(req, &iter->iter, &vec))
block/blk-mq-dma.c
386
if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
block/blk-mq-dma.c
387
return blk_dma_map_bus(iter, &vec);
block/blk-mq-dma.c
388
return blk_dma_map_direct(req, dma_dev, iter, &vec);
block/blk-mq-dma.c
40
bvec_iter_advance_single(iter->bvecs, &iter->iter, bv.bv_len);
block/blk-mq-dma.c
410
struct blk_map_iter iter = {
block/blk-mq-dma.c
412
.iter = bio_integrity(bio)->bip_iter,
block/blk-mq-dma.c
417
while (blk_map_iter_next(rq, &iter, &vec)) {
block/blk-mq-dma.c
47
while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
block/blk-mq-dma.c
50
if (!__blk_map_iter_next(iter))
block/blk-mq-dma.c
53
next = mp_bvec_iter_bvec(iter->bvecs, iter->iter);
block/blk-mq-dma.c
59
bvec_iter_advance_single(iter->bvecs, &iter->iter, next.bv_len);
block/blk-mq-dma.c
80
static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
block/blk-mq-dma.c
82
iter->addr = pci_p2pdma_bus_addr_map(iter->p2pdma.mem, vec->paddr);
block/blk-mq-dma.c
83
iter->len = vec->len;
block/blk-mq-dma.c
88
struct blk_dma_iter *iter, struct phys_vec *vec)
block/blk-mq-dma.c
9
static bool __blk_map_iter_next(struct blk_map_iter *iter)
block/blk-mq-dma.c
92
if (iter->p2pdma.map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
block/blk-mq-dma.c
95
iter->addr = dma_map_phys(dma_dev, vec->paddr, vec->len,
block/blk-mq-dma.c
97
if (dma_mapping_error(dma_dev, iter->addr)) {
block/blk-mq-dma.c
98
iter->status = BLK_STS_RESOURCE;
block/early-lookup.c
127
struct class_dev_iter iter;
block/early-lookup.c
130
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
block/early-lookup.c
131
while ((dev = class_dev_iter_next(&iter))) {
block/early-lookup.c
149
class_dev_iter_exit(&iter);
block/early-lookup.c
274
struct class_dev_iter iter;
block/early-lookup.c
277
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
block/early-lookup.c
278
while ((dev = class_dev_iter_next(&iter))) {
block/early-lookup.c
315
class_dev_iter_exit(&iter);
block/fops.c
175
static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
block/fops.c
181
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
block/fops.c
206
if (is_read && user_backed_iter(iter))
block/fops.c
219
ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
block/fops.c
234
if (unlikely(iov_iter_count(iter))) {
block/fops.c
255
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
block/fops.c
322
struct iov_iter *iter,
block/fops.c
326
bool is_read = iov_iter_rw(iter) == READ;
block/fops.c
344
if (iov_iter_is_bvec(iter)) {
block/fops.c
351
bio_iov_bvec_set(bio, iter);
block/fops.c
353
ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
block/fops.c
360
if (user_backed_iter(iter)) {
block/fops.c
395
static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
block/fops.c
40
struct iov_iter *iter)
block/fops.c
400
if (!iov_iter_count(iter))
block/fops.c
403
if (blkdev_dio_invalid(bdev, iocb, iter))
block/fops.c
406
if (iov_iter_rw(iter) == WRITE) {
block/fops.c
42
return (iocb->ki_pos | iov_iter_count(iter)) &
block/fops.c
426
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
block/fops.c
430
return __blkdev_direct_IO_simple(iocb, iter, bdev,
block/fops.c
432
return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
block/fops.c
436
return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
block/fops.c
47
struct iov_iter *iter, struct block_device *bdev)
block/fops.c
49
return bio_iov_iter_get_pages(bio, iter,
block/fops.c
56
struct iov_iter *iter, struct block_device *bdev,
block/fops.c
73
if (iov_iter_rw(iter) == READ) {
block/fops.c
75
if (user_backed_iter(iter))
block/fops.c
87
ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
block/fops.c
92
if (iov_iter_rw(iter) == WRITE)
block/genhd.c
914
struct class_dev_iter *iter;
block/genhd.c
917
iter = kmalloc_obj(*iter);
block/genhd.c
918
if (!iter)
block/genhd.c
921
seqf->private = iter;
block/genhd.c
922
class_dev_iter_init(iter, &block_class, NULL, &disk_type);
block/genhd.c
924
dev = class_dev_iter_next(iter);
block/genhd.c
946
struct class_dev_iter *iter = seqf->private;
block/genhd.c
949
if (iter) {
block/genhd.c
950
class_dev_iter_exit(iter);
block/genhd.c
951
kfree(iter);
block/sed-opal.c
1013
struct opal_resp_tok *iter;
block/sed-opal.c
1047
iter = resp->toks;
block/sed-opal.c
1052
token_length = response_parse_tiny(iter, pos);
block/sed-opal.c
1054
token_length = response_parse_short(iter, pos);
block/sed-opal.c
1056
token_length = response_parse_medium(iter, pos);
block/sed-opal.c
1058
token_length = response_parse_long(iter, pos);
block/sed-opal.c
1062
token_length = response_parse_token(iter, pos);
block/sed-opal.c
1072
iter++;
block/sed-opal.c
1238
struct opal_suspend_data *iter;
block/sed-opal.c
1240
list_for_each_entry(iter, &dev->unlk_lst, node) {
block/sed-opal.c
1241
if (iter->lr == sus->lr) {
block/sed-opal.c
1242
list_del(&iter->node);
block/sed-opal.c
1243
kfree(iter);
block/sed-opal.c
1570
int *iter,
block/sed-opal.c
1575
int n = *iter;
block/sed-opal.c
1609
*iter = n;
block/sed-opal.c
2853
struct opal_suspend_data *iter;
block/sed-opal.c
2874
list_for_each_entry(iter, &dev->unlk_lst, node) {
block/sed-opal.c
2875
if ((iter->unlk.flags & OPAL_SAVE_FOR_LOCK) &&
block/sed-opal.c
2876
iter->lr == lk_unlk->session.opal_key.lr &&
block/sed-opal.c
2877
iter->unlk.session.opal_key.key_len > 0) {
block/sed-opal.c
2879
iter->unlk.session.opal_key.key_len;
block/sed-opal.c
2881
iter->unlk.session.opal_key.key,
block/sed-opal.c
2882
iter->unlk.session.opal_key.key_len);
block/t10-pi.c
100
"(rcvd %04x, want %04x)\n", iter->disk_name,
block/t10-pi.c
101
(unsigned long long)iter->seed,
block/t10-pi.c
107
iter->data_buf += iter->interval;
block/t10-pi.c
108
iter->prot_buf += bi->metadata_size;
block/t10-pi.c
109
iter->seed++;
block/t10-pi.c
137
struct bvec_iter iter;
block/t10-pi.c
143
bip_for_each_vec(iv, bip, iter) {
block/t10-pi.c
189
struct bvec_iter iter;
block/t10-pi.c
191
bip_for_each_vec(iv, bip, iter) {
block/t10-pi.c
216
static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
block/t10-pi.c
222
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
block/t10-pi.c
223
struct crc64_pi_tuple *pi = iter->prot_buf + offset;
block/t10-pi.c
225
pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval);
block/t10-pi.c
228
iter->prot_buf, offset);
block/t10-pi.c
232
put_unaligned_be48(iter->seed, pi->ref_tag);
block/t10-pi.c
236
iter->data_buf += iter->interval;
block/t10-pi.c
237
iter->prot_buf += bi->metadata_size;
block/t10-pi.c
238
iter->seed++;
block/t10-pi.c
249
static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
block/t10-pi.c
255
for (i = 0; i < iter->data_size; i += iter->interval) {
block/t10-pi.c
256
struct crc64_pi_tuple *pi = iter->prot_buf + offset;
block/t10-pi.c
265
seed = lower_48_bits(iter->seed);
block/t10-pi.c
268
iter->disk_name, seed, ref);
block/t10-pi.c
277
csum = ext_pi_crc64(0, iter->data_buf, iter->interval);
block/t10-pi.c
279
csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf,
block/t10-pi.c
285
iter->disk_name, (unsigned long long)iter->seed,
block/t10-pi.c
291
iter->data_buf += iter->interval;
block/t10-pi.c
292
iter->prot_buf += bi->metadata_size;
block/t10-pi.c
293
iter->seed++;
block/t10-pi.c
311
struct bvec_iter iter;
block/t10-pi.c
317
bip_for_each_vec(iv, bip, iter) {
block/t10-pi.c
352
struct bvec_iter iter;
block/t10-pi.c
354
bip_for_each_vec(iv, bip, iter) {
block/t10-pi.c
37
static void t10_pi_generate(struct blk_integrity_iter *iter,
block/t10-pi.c
379
struct blk_integrity_iter iter;
block/t10-pi.c
383
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
block/t10-pi.c
384
iter.interval = 1 << bi->interval_exp;
block/t10-pi.c
385
iter.seed = bio->bi_iter.bi_sector;
block/t10-pi.c
386
iter.prot_buf = bvec_virt(bip->bip_vec);
block/t10-pi.c
390
iter.data_buf = kaddr;
block/t10-pi.c
391
iter.data_size = bv.bv_len;
block/t10-pi.c
394
ext_pi_crc64_generate(&iter, bi);
block/t10-pi.c
398
t10_pi_generate(&iter, bi);
block/t10-pi.c
411
struct blk_integrity_iter iter;
block/t10-pi.c
419
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
block/t10-pi.c
420
iter.interval = 1 << bi->interval_exp;
block/t10-pi.c
421
iter.seed = saved_iter->bi_sector;
block/t10-pi.c
422
iter.prot_buf = bvec_virt(bip->bip_vec);
block/t10-pi.c
427
iter.data_buf = kaddr;
block/t10-pi.c
428
iter.data_size = bv.bv_len;
block/t10-pi.c
43
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
block/t10-pi.c
431
ret = ext_pi_crc64_verify(&iter, bi);
block/t10-pi.c
435
ret = t10_pi_verify(&iter, bi);
block/t10-pi.c
44
struct t10_pi_tuple *pi = iter->prot_buf + offset;
block/t10-pi.c
46
pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
block/t10-pi.c
50
iter->prot_buf, offset, bi->csum_type);
block/t10-pi.c
54
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
block/t10-pi.c
58
iter->data_buf += iter->interval;
block/t10-pi.c
59
iter->prot_buf += bi->metadata_size;
block/t10-pi.c
60
iter->seed++;
block/t10-pi.c
64
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
block/t10-pi.c
70
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
block/t10-pi.c
71
struct t10_pi_tuple *pi = iter->prot_buf + offset;
block/t10-pi.c
79
lower_32_bits(iter->seed)) {
block/t10-pi.c
81
"(rcvd %u)\n", iter->disk_name,
block/t10-pi.c
83
iter->seed, be32_to_cpu(pi->ref_tag));
block/t10-pi.c
92
csum = t10_pi_csum(0, iter->data_buf, iter->interval,
block/t10-pi.c
95
csum = t10_pi_csum(csum, iter->prot_buf, offset,
drivers/accel/habanalabs/common/command_submission.c
508
struct hl_cs *next = NULL, *iter, *first_cs;
drivers/accel/habanalabs/common/command_submission.c
543
list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node)
drivers/accel/habanalabs/common/command_submission.c
544
if (cs_needs_timeout(iter)) {
drivers/accel/habanalabs/common/command_submission.c
545
next = iter;
drivers/accel/habanalabs/common/hldio.c
266
iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
drivers/accel/habanalabs/common/hldio.c
268
rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
drivers/accel/habanalabs/common/hldio.c
55
struct iov_iter iter;
drivers/acpi/acpi_ipmi.c
359
struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
drivers/acpi/acpi_ipmi.c
363
list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) {
drivers/acpi/acpi_ipmi.c
364
if (msg == iter) {
drivers/acpi/acpi_ipmi.c
365
tx_msg = iter;
drivers/acpi/acpi_ipmi.c
366
list_del(&iter->head);
drivers/acpi/acpi_ipmi.c
379
struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
drivers/acpi/acpi_ipmi.c
391
list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) {
drivers/acpi/acpi_ipmi.c
392
if (msg->msgid == iter->tx_msgid) {
drivers/acpi/acpi_ipmi.c
393
tx_msg = iter;
drivers/acpi/acpi_ipmi.c
394
list_del(&iter->head);
drivers/acpi/acpi_ipmi.c
488
struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp;
drivers/acpi/acpi_ipmi.c
491
list_for_each_entry_safe(iter, temp,
drivers/acpi/acpi_ipmi.c
493
if (iter->ipmi_ifnum != iface) {
drivers/acpi/acpi_ipmi.c
494
ipmi_device = iter;
drivers/acpi/acpi_ipmi.c
495
__ipmi_dev_kill(iter);
drivers/acpi/pfr_update.c
471
struct iov_iter iter;
drivers/acpi/pfr_update.c
487
iov_iter_init(&iter, ITER_SOURCE, &iov, 1, len);
drivers/acpi/pfr_update.c
497
if (!copy_from_iter_full(buf_ptr, len, &iter)) {
drivers/base/attribute_container.c
180
#define klist_for_each_entry(pos, head, member, iter) \
drivers/base/attribute_container.c
181
for (klist_iter_init(head, iter); (pos = ({ \
drivers/base/attribute_container.c
182
struct klist_node *n = klist_next(iter); \
drivers/base/attribute_container.c
184
({ klist_iter_exit(iter) ; NULL; }); \
drivers/base/attribute_container.c
214
struct klist_iter iter;
drivers/base/attribute_container.c
222
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
247
struct klist_iter iter;
drivers/base/attribute_container.c
252
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
257
klist_iter_exit(&iter);
drivers/base/attribute_container.c
269
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
271
klist_iter_exit(&iter);
drivers/base/attribute_container.c
360
struct klist_iter iter;
drivers/base/attribute_container.c
370
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
487
struct klist_iter iter;
drivers/base/attribute_container.c
489
klist_for_each_entry(ic, &cont->containers, node, &iter) {
drivers/base/attribute_container.c
493
klist_iter_exit(&iter);
drivers/base/bus.c
1169
static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp,
drivers/base/bus.c
1176
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
drivers/base/bus.c
1177
iter->type = type;
drivers/base/bus.c
1192
static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
drivers/base/bus.c
1198
knode = klist_next(&iter->ki);
drivers/base/bus.c
1202
if (!iter->type || iter->type == dev->type)
drivers/base/bus.c
1214
static void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
drivers/base/bus.c
1216
klist_iter_exit(&iter->ki);
drivers/base/bus.c
1222
struct subsys_dev_iter iter;
drivers/base/bus.c
1240
subsys_dev_iter_init(&iter, sp, NULL, NULL);
drivers/base/bus.c
1241
while ((dev = subsys_dev_iter_next(&iter)))
drivers/base/bus.c
1243
subsys_dev_iter_exit(&iter);
drivers/base/bus.c
1254
struct subsys_dev_iter iter;
drivers/base/bus.c
1267
subsys_dev_iter_init(&iter, sp, NULL, NULL);
drivers/base/bus.c
1268
while ((dev = subsys_dev_iter_next(&iter)))
drivers/base/bus.c
1270
subsys_dev_iter_exit(&iter);
drivers/base/class.c
320
void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
drivers/base/class.c
326
memset(iter, 0, sizeof(*iter));
drivers/base/class.c
335
klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
drivers/base/class.c
336
iter->type = type;
drivers/base/class.c
337
iter->sp = sp;
drivers/base/class.c
353
struct device *class_dev_iter_next(struct class_dev_iter *iter)
drivers/base/class.c
358
if (!iter->sp)
drivers/base/class.c
362
knode = klist_next(&iter->ki);
drivers/base/class.c
366
if (!iter->type || iter->type == dev->type)
drivers/base/class.c
379
void class_dev_iter_exit(struct class_dev_iter *iter)
drivers/base/class.c
381
klist_iter_exit(&iter->ki);
drivers/base/class.c
382
subsys_put(iter->sp);
drivers/base/class.c
408
struct class_dev_iter iter;
drivers/base/class.c
420
class_dev_iter_init(&iter, class, start, NULL);
drivers/base/class.c
421
while ((dev = class_dev_iter_next(&iter))) {
drivers/base/class.c
426
class_dev_iter_exit(&iter);
drivers/base/class.c
457
struct class_dev_iter iter;
drivers/base/class.c
468
class_dev_iter_init(&iter, class, start, NULL);
drivers/base/class.c
469
while ((dev = class_dev_iter_next(&iter))) {
drivers/base/class.c
475
class_dev_iter_exit(&iter);
drivers/base/class.c
486
struct class_dev_iter iter;
drivers/base/class.c
505
class_dev_iter_init(&iter, parent, NULL, NULL);
drivers/base/class.c
506
while ((dev = class_dev_iter_next(&iter)))
drivers/base/class.c
508
class_dev_iter_exit(&iter);
drivers/base/class.c
520
struct class_dev_iter iter;
drivers/base/class.c
533
class_dev_iter_init(&iter, parent, NULL, NULL);
drivers/base/class.c
534
while ((dev = class_dev_iter_next(&iter)))
drivers/base/class.c
536
class_dev_iter_exit(&iter);
drivers/block/aoe/aoe.h
111
struct bvec_iter iter;
drivers/block/aoe/aoe.h
129
struct bvec_iter iter;
drivers/block/aoe/aoecmd.c
1025
bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
drivers/block/aoe/aoecmd.c
1030
iter.bi_size = cnt;
drivers/block/aoe/aoecmd.c
1032
__bio_for_each_segment(bv, bio, iter, iter) {
drivers/block/aoe/aoecmd.c
1131
if (n > f->iter.bi_size) {
drivers/block/aoe/aoecmd.c
1135
n, f->iter.bi_size);
drivers/block/aoe/aoecmd.c
1139
bvcpy(skb, f->buf->bio, f->iter, n);
drivers/block/aoe/aoecmd.c
1183
if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
drivers/block/aoe/aoecmd.c
1639
buf->iter.bi_size = 0;
drivers/block/aoe/aoecmd.c
198
memset(&f->iter, 0, sizeof(f->iter));
drivers/block/aoe/aoecmd.c
296
skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
drivers/block/aoe/aoecmd.c
301
__bio_for_each_segment(bv, bio, iter, iter)
drivers/block/aoe/aoecmd.c
342
ah->scnt = f->iter.bi_size >> 9;
drivers/block/aoe/aoecmd.c
343
put_lba(ah, f->iter.bi_sector);
drivers/block/aoe/aoecmd.c
352
skb_fillup(skb, f->buf->bio, f->iter);
drivers/block/aoe/aoecmd.c
354
skb->len += f->iter.bi_size;
drivers/block/aoe/aoecmd.c
355
skb->data_len = f->iter.bi_size;
drivers/block/aoe/aoecmd.c
356
skb->truesize += f->iter.bi_size;
drivers/block/aoe/aoecmd.c
385
f->iter = buf->iter;
drivers/block/aoe/aoecmd.c
386
f->iter.bi_size = min_t(unsigned long,
drivers/block/aoe/aoecmd.c
388
f->iter.bi_size);
drivers/block/aoe/aoecmd.c
389
bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
drivers/block/aoe/aoecmd.c
391
if (!buf->iter.bi_size)
drivers/block/aoe/aoecmd.c
575
nf->iter = f->iter;
drivers/block/aoe/aoecmd.c
606
f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
drivers/block/aoe/aoecmd.c
609
for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
drivers/block/aoe/aoecmd.c
616
skb->len += f->iter.bi_size;
drivers/block/aoe/aoecmd.c
617
skb->data_len = f->iter.bi_size;
drivers/block/aoe/aoecmd.c
618
skb->truesize += f->iter.bi_size;
drivers/block/aoe/aoecmd.c
839
buf->iter = bio->bi_iter;
drivers/block/drbd/drbd_main.c
1571
struct bvec_iter iter;
drivers/block/drbd/drbd_main.c
1574
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_main.c
1579
bio_iter_last(bvec, iter)
drivers/block/drbd/drbd_main.c
1590
struct bvec_iter iter;
drivers/block/drbd/drbd_main.c
1593
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_main.c
1598
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
drivers/block/drbd/drbd_receiver.c
1798
struct bvec_iter iter;
drivers/block/drbd/drbd_receiver.c
1820
bio_for_each_segment(bvec, bio, iter) {
drivers/block/drbd/drbd_req.c
332
struct drbd_request *iter = req;
drivers/block/drbd/drbd_req.c
339
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
drivers/block/drbd/drbd_req.c
340
const unsigned int s = iter->rq_state;
drivers/block/drbd/drbd_req.c
343
req = iter;
drivers/block/drbd/drbd_req.c
362
struct drbd_request *iter = req;
drivers/block/drbd/drbd_req.c
369
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
drivers/block/drbd/drbd_req.c
370
const unsigned int s = iter->rq_state;
drivers/block/drbd/drbd_req.c
373
req = iter;
drivers/block/drbd/drbd_req.c
392
struct drbd_request *iter = req;
drivers/block/drbd/drbd_req.c
399
list_for_each_entry_continue(iter, &connection->transfer_log, tl_requests) {
drivers/block/drbd/drbd_req.c
400
const unsigned int s = iter->rq_state;
drivers/block/drbd/drbd_req.c
403
req = iter;
drivers/block/drbd/drbd_worker.c
320
struct bvec_iter iter;
drivers/block/drbd/drbd_worker.c
326
bio_for_each_segment(bvec, bio, iter) {
drivers/block/floppy.c
2434
struct req_iterator iter;
drivers/block/floppy.c
2464
rq_for_each_segment(bv, current_req, iter) {
drivers/block/loop.c
343
struct iov_iter iter;
drivers/block/loop.c
386
iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
drivers/block/loop.c
387
iter.iov_offset = offset;
drivers/block/loop.c
402
ret = file->f_op->write_iter(&cmd->iocb, &iter);
drivers/block/loop.c
404
ret = file->f_op->read_iter(&cmd->iocb, &iter);
drivers/block/n64cart.c
90
struct bvec_iter iter;
drivers/block/n64cart.c
94
bio_for_each_segment(bvec, bio, iter) {
drivers/block/nbd.c
552
struct iov_iter *iter, int msg_flags, int *sent)
drivers/block/nbd.c
565
msg.msg_iter = *iter;
drivers/block/nbd.c
600
struct iov_iter *iter, int msg_flags, int *sent)
drivers/block/nbd.c
605
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
drivers/block/nbd.c
743
struct bvec_iter iter;
drivers/block/nbd.c
746
bio_for_each_segment(bvec, bio, iter) {
drivers/block/nbd.c
747
bool is_last = !next && bio_iter_last(bvec, iter);
drivers/block/nbd.c
937
struct req_iterator iter;
drivers/block/nbd.c
941
rq_for_each_segment(bvec, req, iter) {
drivers/block/null_blk/main.c
1292
struct req_iterator iter;
drivers/block/null_blk/main.c
1296
rq_for_each_segment(bvec, rq, iter) {
drivers/block/ps3disk.c
111
struct req_iterator iter;
drivers/block/ps3disk.c
113
rq_for_each_segment(bv, req, iter)
drivers/block/ps3disk.c
84
struct req_iterator iter;
drivers/block/ps3disk.c
87
rq_for_each_segment(bvec, req, iter) {
drivers/block/ps3disk.c
89
__func__, __LINE__, bio_sectors(iter.bio),
drivers/block/ps3disk.c
90
iter.bio->bi_iter.bi_sector);
drivers/block/ps3vram.c
537
struct bvec_iter iter;
drivers/block/ps3vram.c
540
bio_for_each_segment(bvec, bio, iter) {
drivers/block/rbd.c
2170
rbd_assert(obj_req->bvec_pos.iter.bi_size ==
drivers/block/rbd.c
2465
union rbd_img_fill_iter iter;
drivers/block/rbd.c
2510
fctx->iter = *fctx->pos;
drivers/block/rbd.c
2517
fctx->set_pos_fn, &fctx->iter);
drivers/block/rbd.c
2562
fctx->iter = *fctx->pos;
drivers/block/rbd.c
2569
fctx->count_fn, &fctx->iter);
drivers/block/rbd.c
2586
fctx->iter = *fctx->pos;
drivers/block/rbd.c
2592
fctx->copy_fn, &fctx->iter);
drivers/block/rbd.c
2646
obj_req->bvec_pos.iter.bi_size += bv.bv_len;
drivers/block/rbd.c
2671
struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
drivers/block/rbd.c
2706
obj_req->bvec_pos.iter.bi_size += bv.bv_len;
drivers/block/rbd.c
2734
.iter = { .bi_size = ceph_file_extents_bytes(img_extents,
drivers/block/rbd.c
3002
.iter = { .bi_size = bytes },
drivers/block/rbd.c
758
struct rbd_client *rbdc = NULL, *iter;
drivers/block/rbd.c
764
list_for_each_entry(iter, &rbd_client_list, node) {
drivers/block/rbd.c
765
if (!ceph_compare_options(ceph_opts, iter->client)) {
drivers/block/rbd.c
766
__rbd_get_client(iter);
drivers/block/rbd.c
768
rbdc = iter;
drivers/block/ublk_drv.c
1306
struct req_iterator iter;
drivers/block/ublk_drv.c
1310
rq_for_each_segment(bv, req, iter) {
drivers/block/ublk_drv.c
1323
struct bvec_iter iter;
drivers/block/ublk_drv.c
1329
bio_for_each_integrity_vec(iv, bio, iter) {
drivers/block/ublk_drv.c
1370
struct iov_iter iter;
drivers/block/ublk_drv.c
1373
import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter);
drivers/block/ublk_drv.c
1374
return ublk_copy_user_pages(req, 0, &iter, dir);
drivers/block/ublk_drv.c
1389
struct iov_iter iter;
drivers/block/ublk_drv.c
1394
import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter);
drivers/block/ublk_drv.c
1395
return ublk_copy_user_pages(req, 0, &iter, dir);
drivers/block/ublk_drv.c
3490
struct ublk_batch_io_iter *iter,
drivers/block/ublk_drv.c
3500
for (i = 0; i < bytes; i += iter->elem_bytes) {
drivers/block/ublk_drv.c
3502
(const struct ublk_elem_header *)&iter->buf[i];
drivers/block/ublk_drv.c
3514
iter->done += i;
drivers/block/ublk_drv.c
3518
static int ublk_walk_cmd_buf(struct ublk_batch_io_iter *iter,
drivers/block/ublk_drv.c
3527
while (iter->done < iter->total) {
drivers/block/ublk_drv.c
3528
unsigned int len = min(sizeof(iter->buf), iter->total - iter->done);
drivers/block/ublk_drv.c
3530
if (copy_from_user(iter->buf, iter->uaddr + iter->done, len)) {
drivers/block/ublk_drv.c
3536
ret = __ublk_walk_cmd_buf(ubq, iter, data, len, cb);
drivers/block/ublk_drv.c
3568
static void ublk_batch_revert_prep_cmd(struct ublk_batch_io_iter *iter,
drivers/block/ublk_drv.c
3574
iter->total = iter->done;
drivers/block/ublk_drv.c
3575
iter->done = 0;
drivers/block/ublk_drv.c
3577
ret = ublk_walk_cmd_buf(iter, data, ublk_batch_unprep_io);
drivers/block/ublk_drv.c
3616
struct ublk_batch_io_iter iter = {
drivers/block/ublk_drv.c
3624
ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_prep_io);
drivers/block/ublk_drv.c
3626
if (ret && iter.done)
drivers/block/ublk_drv.c
3627
ublk_batch_revert_prep_cmd(&iter, data);
drivers/block/ublk_drv.c
3697
struct ublk_batch_io_iter iter = {
drivers/block/ublk_drv.c
3706
ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io);
drivers/block/ublk_drv.c
3711
return iter.done == 0 ? ret : iter.done;
drivers/block/ublk_drv.c
3934
ublk_user_copy(struct kiocb *iocb, struct iov_iter *iter, int dir)
drivers/block/ublk_drv.c
3947
if (!user_backed_iter(iter))
drivers/block/ublk_drv.c
4005
ret = ublk_copy_user_integrity(req, buf_off, iter, dir);
drivers/block/ublk_drv.c
4007
ret = ublk_copy_user_pages(req, buf_off, iter, dir);
drivers/block/zloop.c
393
struct iov_iter iter;
drivers/block/zloop.c
512
iov_iter_bvec(&iter, rw, cmd->bvec, nr_bvec, blk_rq_bytes(rq));
drivers/block/zloop.c
519
iov_iter_bvec(&iter, rw,
drivers/block/zloop.c
522
iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
drivers/block/zloop.c
533
ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
drivers/block/zloop.c
535
ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
drivers/block/zram/zram_drv.c
2721
struct bvec_iter iter = bio->bi_iter;
drivers/block/zram/zram_drv.c
2724
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
drivers/block/zram/zram_drv.c
2725
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
drivers/block/zram/zram_drv.c
2727
struct bio_vec bv = bio_iter_iovec(bio, iter);
drivers/block/zram/zram_drv.c
2742
bio_advance_iter_single(bio, &iter, bv.bv_len);
drivers/block/zram/zram_drv.c
2743
} while (iter.bi_size);
drivers/block/zram/zram_drv.c
2752
struct bvec_iter iter = bio->bi_iter;
drivers/block/zram/zram_drv.c
2755
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
drivers/block/zram/zram_drv.c
2756
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
drivers/block/zram/zram_drv.c
2758
struct bio_vec bv = bio_iter_iovec(bio, iter);
drivers/block/zram/zram_drv.c
2772
bio_advance_iter_single(bio, &iter, bv.bv_len);
drivers/block/zram/zram_drv.c
2773
} while (iter.bi_size);
drivers/char/mem.c
456
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
drivers/char/mem.c
460
while (iov_iter_count(iter)) {
drivers/char/mem.c
461
size_t chunk = iov_iter_count(iter), n;
drivers/char/mem.c
465
n = iov_iter_zero(chunk, iter);
drivers/char/mem.c
466
if (!n && iov_iter_count(iter))
drivers/char/misc.c
121
struct miscdevice *c = NULL, *iter;
drivers/char/misc.c
127
list_for_each_entry(iter, &misc_list, list) {
drivers/char/misc.c
128
if (iter->minor != minor)
drivers/char/misc.c
130
c = iter;
drivers/char/misc.c
131
new_fops = fops_get(iter->fops);
drivers/char/misc.c
141
list_for_each_entry(iter, &misc_list, list) {
drivers/char/misc.c
142
if (iter->minor != minor)
drivers/char/misc.c
144
c = iter;
drivers/char/misc.c
145
new_fops = fops_get(iter->fops);
drivers/char/random.c
1386
struct iov_iter iter;
drivers/char/random.c
1407
ret = import_ubuf(ITER_DEST, ubuf, len, &iter);
drivers/char/random.c
1410
return get_random_bytes_user(&iter);
drivers/char/random.c
1419
static ssize_t write_pool_user(struct iov_iter *iter)
drivers/char/random.c
1425
if (unlikely(!iov_iter_count(iter)))
drivers/char/random.c
1429
copied = copy_from_iter(block, sizeof(block), iter);
drivers/char/random.c
1432
if (!iov_iter_count(iter) || copied != sizeof(block))
drivers/char/random.c
1447
static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
drivers/char/random.c
1449
return write_pool_user(iter);
drivers/char/random.c
1452
static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
drivers/char/random.c
1469
current->comm, iov_iter_count(iter));
drivers/char/random.c
1473
return get_random_bytes_user(iter);
drivers/char/random.c
1476
static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
drivers/char/random.c
1488
return get_random_bytes_user(iter);
drivers/char/random.c
1512
struct iov_iter iter;
drivers/char/random.c
1524
ret = import_ubuf(ITER_SOURCE, p, len, &iter);
drivers/char/random.c
1527
ret = write_pool_user(&iter);
drivers/char/random.c
434
static ssize_t get_random_bytes_user(struct iov_iter *iter)
drivers/char/random.c
440
if (unlikely(!iov_iter_count(iter)))
drivers/char/random.c
455
if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
drivers/char/random.c
456
ret = copy_to_iter(&chacha_state.x[4], CHACHA_KEY_SIZE, iter);
drivers/char/random.c
465
copied = copy_to_iter(block, sizeof(block), iter);
drivers/char/random.c
467
if (!iov_iter_count(iter) || copied != sizeof(block))
drivers/char/xillybus/xillybus_class.c
179
struct xilly_unit *unit = NULL, *iter;
drivers/char/xillybus/xillybus_class.c
183
list_for_each_entry(iter, &unit_list, list_entry)
drivers/char/xillybus/xillybus_class.c
184
if (iter->private_data == private_data) {
drivers/char/xillybus/xillybus_class.c
185
unit = iter;
drivers/char/xillybus/xillybus_class.c
220
struct xilly_unit *unit = NULL, *iter;
drivers/char/xillybus/xillybus_class.c
224
list_for_each_entry(iter, &unit_list, list_entry)
drivers/char/xillybus/xillybus_class.c
225
if (iter->major == major &&
drivers/char/xillybus/xillybus_class.c
226
minor >= iter->lowest_minor &&
drivers/char/xillybus/xillybus_class.c
227
minor < (iter->lowest_minor + iter->num_nodes)) {
drivers/char/xillybus/xillybus_class.c
228
unit = iter;
drivers/clk/ti/clkctrl.c
228
struct omap_clkctrl_clk *entry = NULL, *iter;
drivers/clk/ti/clkctrl.c
236
list_for_each_entry(iter, &provider->clocks, node) {
drivers/clk/ti/clkctrl.c
237
if (iter->reg_offset == clkspec->args[0] &&
drivers/clk/ti/clkctrl.c
238
iter->bit_offset == clkspec->args[1]) {
drivers/clk/ti/clkctrl.c
239
entry = iter;
drivers/crypto/cavium/nitrox/nitrox_main.c
271
struct nitrox_device *ndev = NULL, *iter;
drivers/crypto/cavium/nitrox/nitrox_main.c
274
list_for_each_entry(iter, &ndevlist, list) {
drivers/crypto/cavium/nitrox/nitrox_main.c
275
if (nitrox_ready(iter)) {
drivers/crypto/cavium/nitrox/nitrox_main.c
276
ndev = iter;
drivers/crypto/marvell/cesa/cesa.h
550
struct sg_mapping_iter iter;
drivers/crypto/marvell/cesa/cesa.h
790
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
drivers/crypto/marvell/cesa/cesa.h
793
iter->len = len;
drivers/crypto/marvell/cesa/cesa.h
794
iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE);
drivers/crypto/marvell/cesa/cesa.h
795
iter->offset = 0;
drivers/crypto/marvell/cesa/cesa.h
798
static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter,
drivers/crypto/marvell/cesa/cesa.h
802
iter->op_offset = 0;
drivers/crypto/marvell/cesa/cesa.h
803
iter->offset = 0;
drivers/crypto/marvell/cesa/cesa.h
804
iter->sg = sg;
drivers/crypto/marvell/cesa/cesa.h
805
iter->dir = dir;
drivers/crypto/marvell/cesa/cesa.h
809
mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter,
drivers/crypto/marvell/cesa/cesa.h
812
return min(iter->op_len - sgiter->op_offset,
drivers/crypto/marvell/cesa/cesa.h
820
static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
drivers/crypto/marvell/cesa/cesa.h
822
iter->offset += iter->op_len;
drivers/crypto/marvell/cesa/cesa.h
823
iter->op_len = min(iter->len - iter->offset,
drivers/crypto/marvell/cesa/cesa.h
826
return iter->op_len;
drivers/crypto/marvell/cesa/cipher.c
321
struct mv_cesa_skcipher_dma_iter iter;
drivers/crypto/marvell/cesa/cipher.c
348
mv_cesa_skcipher_req_iter_init(&iter, req);
drivers/crypto/marvell/cesa/cipher.c
361
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
drivers/crypto/marvell/cesa/cipher.c
364
ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
drivers/crypto/marvell/cesa/cipher.c
365
&iter.src, flags);
drivers/crypto/marvell/cesa/cipher.c
375
ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
drivers/crypto/marvell/cesa/cipher.c
376
&iter.dst, flags);
drivers/crypto/marvell/cesa/cipher.c
380
} while (mv_cesa_skcipher_req_iter_next_op(&iter));
drivers/crypto/marvell/cesa/cipher.c
41
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
drivers/crypto/marvell/cesa/cipher.c
44
mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
drivers/crypto/marvell/cesa/cipher.c
45
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
drivers/crypto/marvell/cesa/cipher.c
46
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
drivers/crypto/marvell/cesa/cipher.c
50
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
drivers/crypto/marvell/cesa/cipher.c
52
iter->src.op_offset = 0;
drivers/crypto/marvell/cesa/cipher.c
53
iter->dst.op_offset = 0;
drivers/crypto/marvell/cesa/cipher.c
55
return mv_cesa_req_dma_iter_next_op(&iter->base);
drivers/crypto/marvell/cesa/hash.c
27
mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
drivers/crypto/marvell/cesa/hash.c
36
mv_cesa_req_dma_iter_init(&iter->base, len);
drivers/crypto/marvell/cesa/hash.c
37
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
drivers/crypto/marvell/cesa/hash.c
38
iter->src.op_offset = creq->cache_ptr;
drivers/crypto/marvell/cesa/hash.c
42
mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
drivers/crypto/marvell/cesa/hash.c
44
iter->src.op_offset = 0;
drivers/crypto/marvell/cesa/hash.c
46
return mv_cesa_req_dma_iter_next_op(&iter->base);
drivers/crypto/marvell/cesa/hash.c
631
struct mv_cesa_ahash_dma_iter iter;
drivers/crypto/marvell/cesa/hash.c
654
mv_cesa_ahash_req_iter_init(&iter, req);
drivers/crypto/marvell/cesa/hash.c
664
if (iter.base.len > iter.src.op_offset) {
drivers/crypto/marvell/cesa/hash.c
672
&iter.base,
drivers/crypto/marvell/cesa/hash.c
673
&iter.src, flags);
drivers/crypto/marvell/cesa/hash.c
677
frag_len = iter.base.op_len;
drivers/crypto/marvell/cesa/hash.c
679
if (!mv_cesa_ahash_req_iter_next_op(&iter))
drivers/crypto/marvell/cesa/hash.c
692
frag_len = iter.base.op_len;
drivers/crypto/marvell/cesa/hash.c
701
op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
drivers/crypto/marvell/cesa/hash.c
729
iter.base.len;
drivers/crypto/marvell/cesa/tdma.c
15
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
drivers/crypto/marvell/cesa/tdma.c
31
if (sgiter->op_offset == iter->op_len)
drivers/crypto/talitos.c
467
int tail, iter;
drivers/crypto/talitos.c
480
iter = tail;
drivers/crypto/talitos.c
481
while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
drivers/crypto/talitos.c
482
priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
drivers/crypto/talitos.c
483
iter = (iter + 1) & (priv->fifo_len - 1);
drivers/crypto/talitos.c
484
if (iter == tail) {
drivers/crypto/talitos.c
490
if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
drivers/crypto/talitos.c
493
edesc = container_of(priv->chan[ch].fifo[iter].desc,
drivers/crypto/talitos.c
499
return priv->chan[ch].fifo[iter].desc->hdr;
drivers/cxl/core/port.c
1019
struct cxl_port *iter = port;
drivers/cxl/core/port.c
1021
while (iter && !is_cxl_root(iter))
drivers/cxl/core/port.c
1022
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/port.c
1024
if (!iter)
drivers/cxl/core/port.c
1026
get_device(&iter->dev);
drivers/cxl/core/port.c
1027
return to_cxl_root(iter);
drivers/cxl/core/port.c
1800
struct device *iter;
drivers/cxl/core/port.c
1820
for (iter = dev; iter; iter = grandparent(iter)) {
drivers/cxl/core/port.c
1821
struct device *dport_dev = grandparent(iter);
drivers/cxl/core/port.c
1831
dev_name(iter), dev_name(dport_dev));
drivers/cxl/core/port.c
1836
dev_name(iter), dev_name(dport_dev),
drivers/cxl/core/port.c
2370
struct cxl_port *iter = port;
drivers/cxl/core/port.c
2394
dport = iter->parent_dport;
drivers/cxl/core/port.c
2395
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/port.c
2396
is_cxl_root = parent_port_is_cxl_root(iter);
drivers/cxl/core/port.c
2410
dport = iter->parent_dport;
drivers/cxl/core/port.c
710
struct cxl_port *iter;
drivers/cxl/core/port.c
721
iter = port;
drivers/cxl/core/port.c
722
while (!iter->host_bridge &&
drivers/cxl/core/port.c
723
!is_cxl_root(to_cxl_port(iter->dev.parent)))
drivers/cxl/core/port.c
724
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/port.c
725
if (iter->host_bridge)
drivers/cxl/core/port.c
726
port->host_bridge = iter->host_bridge;
drivers/cxl/core/port.c
730
port->host_bridge = iter->uport_dev;
drivers/cxl/core/region.c
1391
struct cxl_port *iter = port;
drivers/cxl/core/region.c
1422
cxl_rr_iter = cxl_rr_load(iter, cxlr);
drivers/cxl/core/region.c
1424
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/region.c
1425
} while (!is_cxl_root(iter));
drivers/cxl/core/region.c
1612
struct cxl_port *iter;
drivers/cxl/core/region.c
1631
iter = cxled_to_port(cxled);
drivers/cxl/core/region.c
1632
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
drivers/cxl/core/region.c
1633
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/region.c
1635
for (ep = cxl_ep_load(iter, cxlmd); iter;
drivers/cxl/core/region.c
1636
iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
drivers/cxl/core/region.c
1637
cxl_port_reset_targets(iter, cxlr);
drivers/cxl/core/region.c
1648
struct cxl_port *iter;
drivers/cxl/core/region.c
1664
iter = cxled_to_port(cxled);
drivers/cxl/core/region.c
1665
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
drivers/cxl/core/region.c
1666
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/region.c
1672
for (ep = cxl_ep_load(iter, cxlmd); iter;
drivers/cxl/core/region.c
1673
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
drivers/cxl/core/region.c
1674
rc = cxl_port_setup_targets(iter, cxlr, cxled);
drivers/cxl/core/region.c
1748
struct cxl_port *iter;
drivers/cxl/core/region.c
1758
for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
drivers/cxl/core/region.c
1759
iter = to_cxl_port(iter->dev.parent)) {
drivers/cxl/core/region.c
1760
rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
drivers/cxl/core/region.c
1768
for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
drivers/cxl/core/region.c
1769
iter = to_cxl_port(iter->dev.parent))
drivers/cxl/core/region.c
1770
cxl_port_detach_region(iter, cxlr, cxled);
drivers/cxl/core/region.c
1899
struct cxl_port *iter, *port = cxled_to_port(cxled);
drivers/cxl/core/region.c
1934
for (iter = port; iter; iter = parent_port_of(iter)) {
drivers/cxl/core/region.c
1935
if (is_cxl_root(iter))
drivers/cxl/core/region.c
1938
rc = find_pos_and_ways(iter, hpa_range, &parent_pos,
drivers/cxl/core/region.c
2192
for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter);
drivers/cxl/core/region.c
2193
iter = to_cxl_port(iter->dev.parent))
drivers/cxl/core/region.c
2194
cxl_port_detach_region(iter, cxlr, cxled);
drivers/cxl/core/region.c
264
struct cxl_port *iter = cxled_to_port(cxled);
drivers/cxl/core/region.c
271
while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
drivers/cxl/core/region.c
272
iter = to_cxl_port(iter->dev.parent);
drivers/cxl/core/region.c
274
for (ep = cxl_ep_load(iter, cxlmd); iter;
drivers/cxl/core/region.c
275
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
drivers/cxl/core/region.c
279
cxl_rr = cxl_rr_load(iter, cxlr);
drivers/cxl/core/region.c
321
struct cxl_port *iter;
drivers/cxl/core/region.c
325
for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
drivers/cxl/core/region.c
326
iter = to_cxl_port(iter->dev.parent)) {
drivers/cxl/core/region.c
327
cxl_rr = cxl_rr_load(iter, cxlr);
drivers/cxl/core/region.c
336
for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
drivers/cxl/core/region.c
337
iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
drivers/cxl/core/region.c
338
cxl_rr = cxl_rr_load(iter, cxlr);
drivers/cxl/core/region.c
4001
struct cxl_region_ref *iter;
drivers/cxl/core/region.c
4009
xa_for_each(&endpoint->regions, index, iter) {
drivers/cxl/core/region.c
4010
struct cxl_region_params *p = &iter->region->params;
drivers/cxl/core/region.c
980
struct cxl_region_ref *cxl_rr, *iter;
drivers/cxl/core/region.c
984
xa_for_each(&port->regions, index, iter) {
drivers/cxl/core/region.c
985
struct cxl_region_params *ip = &iter->region->params;
drivers/cxl/core/region.c
991
if (auto_order_ok(port, iter->region, cxld))
drivers/cxl/core/region.c
996
dev_name(&iter->region->dev), ip->res, p->res);
drivers/cxl/port.c
290
struct cxl_port *endpoint, *iter, *down;
drivers/cxl/port.c
297
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
drivers/cxl/port.c
298
down = iter, iter = to_cxl_port(iter->dev.parent)) {
drivers/cxl/port.c
301
ep = cxl_ep_load(iter, cxlmd);
drivers/dax/super.c
224
void *addr, size_t bytes, struct iov_iter *iter)
drivers/dax/super.c
228
return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
drivers/dma-buf/dma-buf.c
498
struct dma_fence_unwrap iter;
drivers/dma-buf/dma-buf.c
519
dma_fence_unwrap_for_each(f, &iter, fence)
drivers/dma-buf/dma-buf.c
527
dma_fence_unwrap_for_each(f, &iter, fence)
drivers/dma-buf/dma-fence-unwrap.c
120
struct dma_fence_unwrap *iter)
drivers/dma-buf/dma-fence-unwrap.c
130
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
drivers/dma-buf/dma-fence-unwrap.c
164
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
drivers/dma-buf/st-dma-fence-unwrap.c
126
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
147
dma_fence_unwrap_for_each(fence, &iter, array) {
drivers/dma-buf/st-dma-fence-unwrap.c
170
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
191
dma_fence_unwrap_for_each(fence, &iter, chain) {
drivers/dma-buf/st-dma-fence-unwrap.c
214
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
239
dma_fence_unwrap_for_each(fence, &iter, chain) {
drivers/dma-buf/st-dma-fence-unwrap.c
262
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
285
dma_fence_unwrap_for_each(fence, &iter, f3) {
drivers/dma-buf/st-dma-fence-unwrap.c
314
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
329
dma_fence_unwrap_for_each(fence, &iter, f2) {
drivers/dma-buf/st-dma-fence-unwrap.c
353
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
388
dma_fence_unwrap_for_each(fence, &iter, f4) {
drivers/dma-buf/st-dma-fence-unwrap.c
419
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
454
dma_fence_unwrap_for_each(fence, &iter, a2) {
drivers/dma-buf/st-dma-fence-unwrap.c
485
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
515
dma_fence_unwrap_for_each(fence, &iter, f5) {
drivers/dma-buf/st-dma-fence-unwrap.c
548
struct dma_fence_unwrap iter;
drivers/dma-buf/st-dma-fence-unwrap.c
592
dma_fence_unwrap_for_each(fence, &iter, f7) {
drivers/dma-buf/sync_file.c
299
struct dma_fence_unwrap iter;
drivers/dma-buf/sync_file.c
313
dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
drivers/dma-buf/sync_file.c
338
dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
drivers/dma/at_xdmac.c
1542
struct at_xdmac_desc *desc, *_desc, *iter;
drivers/dma/at_xdmac.c
1658
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
drivers/dma/at_xdmac.c
1659
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
drivers/dma/at_xdmac.c
1660
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
drivers/dma/at_xdmac.c
1661
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
drivers/dma/at_xdmac.c
1662
desc = iter;
drivers/dma/fsl-edma-common.c
601
u16 soff, doff, iter;
drivers/dma/fsl-edma-common.c
636
iter = period_len / nbytes;
drivers/dma/fsl-edma-common.c
668
fsl_chan->attr, soff, nbytes, 0, iter,
drivers/dma/fsl-edma-common.c
669
iter, doff, last_sg, major_int, false, true);
drivers/dma/fsl-edma-common.c
685
u16 soff, doff, iter;
drivers/dma/fsl-edma-common.c
760
iter = sg_dma_len(sg) / nbytes;
drivers/dma/fsl-edma-common.c
765
nbytes, 0, iter, iter, doff, last_sg,
drivers/dma/fsl-edma-common.c
771
nbytes, 0, iter, iter, doff, last_sg,
drivers/dma/mpc512x_dma.c
697
int iter, i;
drivers/dma/mpc512x_dma.c
778
iter = len / tcd->nbytes;
drivers/dma/mpc512x_dma.c
779
if (iter >= 1 << 15) {
drivers/dma/mpc512x_dma.c
784
tcd->biter = iter & 0x1ff;
drivers/dma/mpc512x_dma.c
785
tcd->biter_linkch = iter >> 9;
drivers/dma/mv_xor.c
217
struct mv_xor_desc_slot *iter, *_iter;
drivers/dma/mv_xor.c
220
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
drivers/dma/mv_xor.c
223
if (async_tx_test_ack(&iter->async_tx)) {
drivers/dma/mv_xor.c
224
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
225
if (!list_empty(&iter->sg_tx_list)) {
drivers/dma/mv_xor.c
226
list_splice_tail_init(&iter->sg_tx_list,
drivers/dma/mv_xor.c
265
struct mv_xor_desc_slot *iter, *_iter;
drivers/dma/mv_xor.c
280
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
drivers/dma/mv_xor.c
284
hw_desc = iter->hw_desc;
drivers/dma/mv_xor.c
286
cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
drivers/dma/mv_xor.c
290
mv_desc_clean_slot(iter, mv_chan);
drivers/dma/mv_xor.c
293
if (iter->async_tx.phys == current_desc) {
drivers/dma/mv_xor.c
298
if (iter->async_tx.phys == current_desc) {
drivers/dma/mv_xor.c
311
iter = list_entry(mv_chan->chain.next,
drivers/dma/mv_xor.c
314
mv_chan_start_new_chain(mv_chan, iter);
drivers/dma/mv_xor.c
316
if (!list_is_last(&iter->node, &mv_chan->chain)) {
drivers/dma/mv_xor.c
321
iter = list_entry(iter->node.next,
drivers/dma/mv_xor.c
324
mv_chan_start_new_chain(mv_chan, iter);
drivers/dma/mv_xor.c
351
struct mv_xor_desc_slot *iter;
drivers/dma/mv_xor.c
356
iter = list_first_entry(&mv_chan->free_slots,
drivers/dma/mv_xor.c
360
list_move_tail(&iter->node, &mv_chan->allocated_slots);
drivers/dma/mv_xor.c
365
async_tx_ack(&iter->async_tx);
drivers/dma/mv_xor.c
366
iter->async_tx.cookie = -EBUSY;
drivers/dma/mv_xor.c
368
return iter;
drivers/dma/mv_xor.c
630
struct mv_xor_desc_slot *iter, *_iter;
drivers/dma/mv_xor.c
637
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
drivers/dma/mv_xor.c
640
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
642
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
drivers/dma/mv_xor.c
645
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
647
list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
drivers/dma/mv_xor.c
650
list_move_tail(&iter->node, &mv_chan->free_slots);
drivers/dma/mv_xor.c
653
iter, _iter, &mv_chan->free_slots, node) {
drivers/dma/mv_xor.c
654
list_del(&iter->node);
drivers/dma/mv_xor.c
655
kfree(iter);
drivers/dma/ppc4xx/adma.c
1097
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
1110
iter = chan_last_sub[chan->device->id];
drivers/dma/ppc4xx/adma.c
1111
BUG_ON(!iter);
drivers/dma/ppc4xx/adma.c
1114
iter = chan_first_cdb[chan->device->id];
drivers/dma/ppc4xx/adma.c
1115
BUG_ON(!iter);
drivers/dma/ppc4xx/adma.c
1116
ppc440spe_dma_put_desc(chan, iter);
drivers/dma/ppc4xx/adma.c
1121
if (!iter->hw_next)
drivers/dma/ppc4xx/adma.c
1125
list_for_each_entry_continue(iter, &chan->chain, chain_node) {
drivers/dma/ppc4xx/adma.c
1126
ppc440spe_dma_put_desc(chan, iter);
drivers/dma/ppc4xx/adma.c
1127
if (!iter->hw_next)
drivers/dma/ppc4xx/adma.c
1423
struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
drivers/dma/ppc4xx/adma.c
1432
list_for_each_entry(iter, &tdesc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
1436
return iter;
drivers/dma/ppc4xx/adma.c
1535
struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
drivers/dma/ppc4xx/adma.c
1554
list_for_each_entry_safe(iter, _iter, &chan->chain,
drivers/dma/ppc4xx/adma.c
1559
iter->async_tx.cookie, iter->idx, busy, iter->phys,
drivers/dma/ppc4xx/adma.c
1560
ppc440spe_desc_get_link(iter, chan), current_desc,
drivers/dma/ppc4xx/adma.c
1561
async_tx_test_ack(&iter->async_tx));
drivers/dma/ppc4xx/adma.c
1576
if (iter->phys == current_desc) {
drivers/dma/ppc4xx/adma.c
1578
if (busy || ppc440spe_desc_get_link(iter, chan)) {
drivers/dma/ppc4xx/adma.c
1588
slot_cnt = iter->slot_cnt;
drivers/dma/ppc4xx/adma.c
1589
slots_per_op = iter->slots_per_op;
drivers/dma/ppc4xx/adma.c
1598
group_start = iter;
drivers/dma/ppc4xx/adma.c
1642
cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
drivers/dma/ppc4xx/adma.c
1645
if (ppc440spe_adma_clean_slot(iter, chan))
drivers/dma/ppc4xx/adma.c
1687
struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
drivers/dma/ppc4xx/adma.c
1701
iter = chan->last_used;
drivers/dma/ppc4xx/adma.c
1703
iter = list_entry(&chan->all_slots,
drivers/dma/ppc4xx/adma.c
1706
list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
drivers/dma/ppc4xx/adma.c
1710
if (iter->slots_per_op) {
drivers/dma/ppc4xx/adma.c
1717
alloc_start = iter;
drivers/dma/ppc4xx/adma.c
1723
iter = alloc_start;
drivers/dma/ppc4xx/adma.c
1728
async_tx_ack(&iter->async_tx);
drivers/dma/ppc4xx/adma.c
1730
list_add_tail(&iter->chain_node, &chain);
drivers/dma/ppc4xx/adma.c
1731
alloc_tail = iter;
drivers/dma/ppc4xx/adma.c
1732
iter->async_tx.cookie = 0;
drivers/dma/ppc4xx/adma.c
1733
iter->hw_next = NULL;
drivers/dma/ppc4xx/adma.c
1734
iter->flags = 0;
drivers/dma/ppc4xx/adma.c
1735
iter->slot_cnt = num_slots;
drivers/dma/ppc4xx/adma.c
1736
iter->xor_check_result = NULL;
drivers/dma/ppc4xx/adma.c
1738
iter->slots_per_op = slots_per_op - i;
drivers/dma/ppc4xx/adma.c
1739
last_used = iter;
drivers/dma/ppc4xx/adma.c
174
struct ppc440spe_adma_desc_slot *iter)
drivers/dma/ppc4xx/adma.c
1740
iter = list_entry(iter->slot_node.next,
drivers/dma/ppc4xx/adma.c
176
for (; iter; iter = iter->hw_next)
drivers/dma/ppc4xx/adma.c
177
print_cb(chan, iter->hw_desc);
drivers/dma/ppc4xx/adma.c
2100
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
2110
iter = list_first_entry(&sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
2113
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2115
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2118
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2119
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2122
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2124
ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
drivers/dma/ppc4xx/adma.c
2125
ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
drivers/dma/ppc4xx/adma.c
2127
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
drivers/dma/ppc4xx/adma.c
2128
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2134
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
2137
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2138
iter->hw_next = NULL;
drivers/dma/ppc4xx/adma.c
2140
set_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2142
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2144
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2146
ppc440spe_desc_set_src_addr(iter, chan, 0,
drivers/dma/ppc4xx/adma.c
2148
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2151
ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
drivers/dma/ppc4xx/adma.c
2153
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
drivers/dma/ppc4xx/adma.c
2154
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2186
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
2194
iter = list_first_entry(&sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
2197
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2198
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2201
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2202
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2205
ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
drivers/dma/ppc4xx/adma.c
2207
ppc440spe_desc_set_dest_addr(iter, chan, 0,
drivers/dma/ppc4xx/adma.c
2209
ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
drivers/dma/ppc4xx/adma.c
2211
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
drivers/dma/ppc4xx/adma.c
2212
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2216
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
2219
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2221
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2225
set_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2227
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2229
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2231
ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
drivers/dma/ppc4xx/adma.c
2233
ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
drivers/dma/ppc4xx/adma.c
2235
ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
drivers/dma/ppc4xx/adma.c
2237
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
drivers/dma/ppc4xx/adma.c
2238
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2244
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
2247
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2248
iter->hw_next = NULL;
drivers/dma/ppc4xx/adma.c
2250
set_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2252
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
2254
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2256
ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
drivers/dma/ppc4xx/adma.c
2258
ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
drivers/dma/ppc4xx/adma.c
2260
ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
drivers/dma/ppc4xx/adma.c
2262
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
drivers/dma/ppc4xx/adma.c
2263
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2278
struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
drivers/dma/ppc4xx/adma.c
2405
list_for_each_entry(iter, &sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
2407
ppc440spe_desc_set_byte_count(iter,
drivers/dma/ppc4xx/adma.c
2409
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2423
struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
drivers/dma/ppc4xx/adma.c
2445
list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
2446
ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
drivers/dma/ppc4xx/adma.c
2448
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
drivers/dma/ppc4xx/adma.c
2450
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2452
ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
drivers/dma/ppc4xx/adma.c
2453
iter->rxor_cursor.len = len;
drivers/dma/ppc4xx/adma.c
2454
iter->descs_per_op = descs_per_op;
drivers/dma/ppc4xx/adma.c
2457
list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
2460
ppc440spe_adma_init_dma2rxor_slot(iter, src,
drivers/dma/ppc4xx/adma.c
2462
if (likely(!list_is_last(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
2465
iter->hw_next =
drivers/dma/ppc4xx/adma.c
2466
list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2469
ppc440spe_xor_set_link(iter, iter->hw_next);
drivers/dma/ppc4xx/adma.c
2472
iter->hw_next = NULL;
drivers/dma/ppc4xx/adma.c
2588
struct ppc440spe_adma_desc_slot *sw_desc, *iter;
drivers/dma/ppc4xx/adma.c
2626
list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
2627
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
drivers/dma/ppc4xx/adma.c
2629
iter->unmap_len = len;
drivers/dma/ppc4xx/adma.c
2636
iter = sw_desc->group_head;
drivers/dma/ppc4xx/adma.c
2637
chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
drivers/dma/ppc4xx/adma.c
2638
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2639
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2642
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2644
iter->src_cnt = 0;
drivers/dma/ppc4xx/adma.c
2645
iter->dst_cnt = 0;
drivers/dma/ppc4xx/adma.c
2646
ppc440spe_desc_set_dest_addr(iter, chan, 0,
drivers/dma/ppc4xx/adma.c
2648
ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
drivers/dma/ppc4xx/adma.c
2649
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
drivers/dma/ppc4xx/adma.c
2651
iter->unmap_len = 0;
drivers/dma/ppc4xx/adma.c
2659
iter = list_first_entry(&sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
2662
chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
drivers/dma/ppc4xx/adma.c
2665
iter = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2670
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
2671
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2674
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
2676
iter->src_cnt = 0;
drivers/dma/ppc4xx/adma.c
2677
iter->dst_cnt = 0;
drivers/dma/ppc4xx/adma.c
2678
ppc440spe_desc_set_dest_addr(iter, chan, 0,
drivers/dma/ppc4xx/adma.c
2680
ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
drivers/dma/ppc4xx/adma.c
2681
ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
drivers/dma/ppc4xx/adma.c
2683
iter->unmap_len = 0;
drivers/dma/ppc4xx/adma.c
2693
list_for_each_entry_reverse(iter, &sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
2703
&iter->flags);
drivers/dma/ppc4xx/adma.c
2706
&iter->flags);
drivers/dma/ppc4xx/adma.c
2711
&iter->flags);
drivers/dma/ppc4xx/adma.c
2714
&iter->flags);
drivers/dma/ppc4xx/adma.c
2717
iter->xor_check_result = pqres;
drivers/dma/ppc4xx/adma.c
2723
*iter->xor_check_result = 0;
drivers/dma/ppc4xx/adma.c
2724
ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
drivers/dma/ppc4xx/adma.c
2732
list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
2737
chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
drivers/dma/ppc4xx/adma.c
2738
ppc440spe_desc_set_src_addr(iter, chan, 0,
drivers/dma/ppc4xx/adma.c
2744
ppc440spe_desc_set_src_mult(iter, chan,
drivers/dma/ppc4xx/adma.c
2808
static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
drivers/dma/ppc4xx/adma.c
2815
ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
drivers/dma/ppc4xx/adma.c
2818
ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
drivers/dma/ppc4xx/adma.c
2821
ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
drivers/dma/ppc4xx/adma.c
2832
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
2866
iter = ppc440spe_get_group_entry(sw_desc, index);
drivers/dma/ppc4xx/adma.c
2869
list_for_each_entry_from(iter,
drivers/dma/ppc4xx/adma.c
2871
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2875
list_for_each_entry_from(iter,
drivers/dma/ppc4xx/adma.c
2877
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2879
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2891
iter = ppc440spe_get_group_entry(
drivers/dma/ppc4xx/adma.c
2893
ppc440spe_adma_pq_zero_op(iter, chan,
drivers/dma/ppc4xx/adma.c
2899
iter = ppc440spe_get_group_entry(
drivers/dma/ppc4xx/adma.c
2901
ppc440spe_adma_pq_zero_op(iter, chan,
drivers/dma/ppc4xx/adma.c
2923
iter = ppc440spe_get_group_entry(sw_desc, index++);
drivers/dma/ppc4xx/adma.c
2924
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2929
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
2931
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2939
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
2943
list_for_each_entry_from(iter,
drivers/dma/ppc4xx/adma.c
2947
iter, chan,
drivers/dma/ppc4xx/adma.c
2953
list_for_each_entry_from(iter,
drivers/dma/ppc4xx/adma.c
2957
iter, chan,
drivers/dma/ppc4xx/adma.c
2961
iter, chan,
drivers/dma/ppc4xx/adma.c
2987
iter = ppc440spe_get_group_entry(sw_desc, 0);
drivers/dma/ppc4xx/adma.c
2989
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
2992
iter = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
2999
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3002
ppc440spe_desc_set_dest_addr(iter,
drivers/dma/ppc4xx/adma.c
3004
iter = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
3022
struct ppc440spe_adma_desc_slot *iter, *end;
drivers/dma/ppc4xx/adma.c
3041
iter = ppc440spe_get_group_entry(sw_desc, idx);
drivers/dma/ppc4xx/adma.c
3045
list_for_each_entry_from(iter, &sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
3047
if (unlikely(iter == end))
drivers/dma/ppc4xx/adma.c
3049
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
3051
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
3057
list_for_each_entry_from(iter, &sw_desc->group_list,
drivers/dma/ppc4xx/adma.c
3059
if (unlikely(iter == end))
drivers/dma/ppc4xx/adma.c
3061
ppc440spe_desc_set_dest_addr(iter, chan,
drivers/dma/ppc4xx/adma.c
3100
struct ppc440spe_adma_desc_slot *iter = NULL;
drivers/dma/ppc4xx/adma.c
3138
iter = ppc440spe_get_group_entry(sw_desc, 0);
drivers/dma/ppc4xx/adma.c
3144
iter = NULL;
drivers/dma/ppc4xx/adma.c
3150
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3165
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3169
if (likely(iter)) {
drivers/dma/ppc4xx/adma.c
3170
ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
drivers/dma/ppc4xx/adma.c
3178
iter = ppc440spe_get_group_entry(sw_desc, 1);
drivers/dma/ppc4xx/adma.c
3179
ppc440spe_desc_set_src_addr(iter, chan, 0,
drivers/dma/ppc4xx/adma.c
3187
iter = sw_desc->group_head;
drivers/dma/ppc4xx/adma.c
3188
if (iter->dst_cnt == 2) {
drivers/dma/ppc4xx/adma.c
3190
ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
drivers/dma/ppc4xx/adma.c
3193
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3196
ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
drivers/dma/ppc4xx/adma.c
322
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
336
list_for_each_entry(iter, &desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
337
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
338
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
340
if (likely(!list_is_last(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
343
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
345
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
3451
struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
drivers/dma/ppc4xx/adma.c
3464
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3474
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3493
iter = ppc440spe_get_group_entry(sw_desc, index + znum);
drivers/dma/ppc4xx/adma.c
3498
if (likely(iter)) {
drivers/dma/ppc4xx/adma.c
3499
ppc440spe_desc_set_src_mult(iter, chan,
drivers/dma/ppc4xx/adma.c
3514
iter = sw_desc->group_head;
drivers/dma/ppc4xx/adma.c
3517
ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
drivers/dma/ppc4xx/adma.c
352
iter->hw_next = NULL;
drivers/dma/ppc4xx/adma.c
3520
iter = ppc440spe_get_group_entry(sw_desc,
drivers/dma/ppc4xx/adma.c
3523
ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
drivers/dma/ppc4xx/adma.c
3534
struct ppc440spe_adma_desc_slot *iter, *_iter;
drivers/dma/ppc4xx/adma.c
354
set_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
3541
list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
drivers/dma/ppc4xx/adma.c
3544
list_del(&iter->chain_node);
drivers/dma/ppc4xx/adma.c
3546
list_for_each_entry_safe_reverse(iter, _iter,
drivers/dma/ppc4xx/adma.c
3548
list_del(&iter->slot_node);
drivers/dma/ppc4xx/adma.c
3549
kfree(iter);
drivers/dma/ppc4xx/adma.c
356
clear_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
367
iter = list_first_entry(&desc->group_list,
drivers/dma/ppc4xx/adma.c
3700
struct ppc440spe_adma_desc_slot *sw_desc, *iter;
drivers/dma/ppc4xx/adma.c
3718
list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
3719
ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
drivers/dma/ppc4xx/adma.c
372
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
3720
iter->unmap_len = PAGE_SIZE;
drivers/dma/ppc4xx/adma.c
374
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
380
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
382
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
387
list_for_each_entry_from(iter, &desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
388
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
398
iter = list_first_entry(&desc->group_list,
drivers/dma/ppc4xx/adma.c
401
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
405
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
408
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
414
iter = list_first_entry(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
417
list_for_each_entry_from(iter, &desc->group_list,
drivers/dma/ppc4xx/adma.c
419
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
435
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
445
iter = list_first_entry(&desc->group_list,
drivers/dma/ppc4xx/adma.c
447
iter = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
451
iter = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
455
list_for_each_entry_from(iter, &desc->group_list, chain_node) {
drivers/dma/ppc4xx/adma.c
456
hw_desc = iter->hw_desc;
drivers/dma/ppc4xx/adma.c
457
memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
drivers/dma/ppc4xx/adma.c
458
iter->src_cnt = 0;
drivers/dma/ppc4xx/adma.c
459
iter->dst_cnt = 0;
drivers/dma/ppc4xx/adma.c
475
if (likely(!list_is_last(&iter->chain_node,
drivers/dma/ppc4xx/adma.c
478
iter->hw_next = list_entry(iter->chain_node.next,
drivers/dma/ppc4xx/adma.c
487
iter->hw_next = NULL;
drivers/dma/ppc4xx/adma.c
491
set_bit(PPC440SPE_DESC_INT, &iter->flags);
drivers/dma/ppc4xx/adma.c
938
struct ppc440spe_adma_desc_slot *iter;
drivers/dma/ppc4xx/adma.c
945
list_for_each_entry(iter, &chan->chain,
drivers/dma/ppc4xx/adma.c
947
if (iter->phys == phys)
drivers/dma/ppc4xx/adma.c
954
BUG_ON(&iter->chain_node == &chan->chain);
drivers/dma/ppc4xx/adma.c
956
if (iter->xor_check_result) {
drivers/dma/ppc4xx/adma.c
958
&iter->flags)) {
drivers/dma/ppc4xx/adma.c
959
*iter->xor_check_result |=
drivers/dma/ppc4xx/adma.c
963
&iter->flags)) {
drivers/dma/ppc4xx/adma.c
964
*iter->xor_check_result |=
drivers/dma/qcom/hidma_ll.c
68
#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
drivers/dma/qcom/hidma_ll.c
70
iter += size; \
drivers/dma/qcom/hidma_ll.c
71
if (iter >= ring_size) \
drivers/dma/qcom/hidma_ll.c
72
iter -= ring_size; \
drivers/firewire/core-transaction.c
105
t = find_and_pop_transaction_entry(card, iter == transaction);
drivers/firewire/core-transaction.c
1183
iter->node_id == source && iter->tlabel == tlabel);
drivers/firewire/core-transaction.c
85
struct fw_transaction *iter, *t = NULL; \
drivers/firewire/core-transaction.c
86
list_for_each_entry(iter, &card->transactions.list, link) { \
drivers/firewire/core-transaction.c
88
t = iter; \
drivers/firewire/sbp2.c
412
struct sbp2_orb *orb = NULL, *iter;
drivers/firewire/sbp2.c
437
list_for_each_entry(iter, &lu->orb_list, link) {
drivers/firewire/sbp2.c
439
STATUS_GET_ORB_LOW(status) == iter->request_bus) {
drivers/firewire/sbp2.c
440
iter->rcode = RCODE_COMPLETE;
drivers/firewire/sbp2.c
441
list_del(&iter->link);
drivers/firewire/sbp2.c
442
orb = iter;
drivers/firmware/arm_scmi/clock.c
303
void *iter;
drivers/firmware/arm_scmi/clock.c
306
iter = ph->hops->iter_response_init(ph, &ops, 0,
drivers/firmware/arm_scmi/clock.c
310
if (IS_ERR(iter))
drivers/firmware/arm_scmi/clock.c
311
return PTR_ERR(iter);
drivers/firmware/arm_scmi/clock.c
313
ret = ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/clock.c
515
void *iter;
drivers/firmware/arm_scmi/clock.c
527
iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
drivers/firmware/arm_scmi/clock.c
531
if (IS_ERR(iter))
drivers/firmware/arm_scmi/clock.c
532
return PTR_ERR(iter);
drivers/firmware/arm_scmi/clock.c
534
ret = ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/driver.c
1815
static int scmi_iterator_run(void *iter)
drivers/firmware/arm_scmi/driver.c
1821
struct scmi_iterator *i = iter;
drivers/firmware/arm_scmi/perf.c
469
void *iter;
drivers/firmware/arm_scmi/perf.c
476
iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
drivers/firmware/arm_scmi/perf.c
480
if (IS_ERR(iter))
drivers/firmware/arm_scmi/perf.c
481
return PTR_ERR(iter);
drivers/firmware/arm_scmi/perf.c
483
ret = ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/pinctrl.c
291
void *iter;
drivers/firmware/arm_scmi/pinctrl.c
310
iter = ph->hops->iter_response_init(ph, &ops, size,
drivers/firmware/arm_scmi/pinctrl.c
314
if (IS_ERR(iter))
drivers/firmware/arm_scmi/pinctrl.c
315
return PTR_ERR(iter);
drivers/firmware/arm_scmi/pinctrl.c
317
return ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/pinctrl.c
400
void *iter;
drivers/firmware/arm_scmi/pinctrl.c
425
iter = ph->hops->iter_response_init(ph, &ops, max_configs,
drivers/firmware/arm_scmi/pinctrl.c
429
if (IS_ERR(iter))
drivers/firmware/arm_scmi/pinctrl.c
430
return PTR_ERR(iter);
drivers/firmware/arm_scmi/pinctrl.c
432
return ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/protocols.h
278
int (*iter_response_run)(void *iter);
drivers/firmware/arm_scmi/sensors.c
356
void *iter;
drivers/firmware/arm_scmi/sensors.c
367
iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
drivers/firmware/arm_scmi/sensors.c
371
if (IS_ERR(iter))
drivers/firmware/arm_scmi/sensors.c
372
return PTR_ERR(iter);
drivers/firmware/arm_scmi/sensors.c
374
return ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/sensors.c
494
void *iter;
drivers/firmware/arm_scmi/sensors.c
505
iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
drivers/firmware/arm_scmi/sensors.c
509
if (IS_ERR(iter))
drivers/firmware/arm_scmi/sensors.c
510
return PTR_ERR(iter);
drivers/firmware/arm_scmi/sensors.c
516
ret = ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/sensors.c
529
void *iter;
drivers/firmware/arm_scmi/sensors.c
545
iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
drivers/firmware/arm_scmi/sensors.c
549
if (IS_ERR(iter))
drivers/firmware/arm_scmi/sensors.c
550
return PTR_ERR(iter);
drivers/firmware/arm_scmi/sensors.c
552
ret = ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/sensors.c
694
void *iter;
drivers/firmware/arm_scmi/sensors.c
701
iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors,
drivers/firmware/arm_scmi/sensors.c
704
if (IS_ERR(iter))
drivers/firmware/arm_scmi/sensors.c
705
return PTR_ERR(iter);
drivers/firmware/arm_scmi/sensors.c
707
return ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
440
void *iter;
drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
445
iter = ph->hops->iter_response_init(ph, &ops, *size, SCMI_IMX_MISC_SYSLOG_GET,
drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
448
if (IS_ERR(iter))
drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
449
return PTR_ERR(iter);
drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
452
return ph->hops->iter_response_run(iter);
drivers/firmware/arm_scmi/voltage.c
184
void *iter;
drivers/firmware/arm_scmi/voltage.c
195
iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
drivers/firmware/arm_scmi/voltage.c
199
if (IS_ERR(iter))
drivers/firmware/arm_scmi/voltage.c
200
return PTR_ERR(iter);
drivers/firmware/arm_scmi/voltage.c
202
ret = ph->hops->iter_response_run(iter);
drivers/firmware/efi/embedded-firmware.c
124
struct efi_embedded_fw *iter, *fw = NULL;
drivers/firmware/efi/embedded-firmware.c
132
list_for_each_entry(iter, &efi_embedded_fw_list, list) {
drivers/firmware/efi/embedded-firmware.c
133
if (strcmp(name, iter->name) == 0) {
drivers/firmware/efi/embedded-firmware.c
134
fw = iter;
drivers/firmware/qcom/qcom_tzmem.c
305
struct radix_tree_iter iter;
drivers/firmware/qcom/qcom_tzmem.c
315
radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
drivers/firmware/qcom/qcom_tzmem.c
488
struct radix_tree_iter iter;
drivers/firmware/qcom/qcom_tzmem.c
494
radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1603
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1618
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1619
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1623
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1638
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
967
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
973
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
975
&iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
993
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
204
struct drm_print_iterator iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
209
iter.data = buffer;
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
210
iter.offset = 0;
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
211
iter.start = offset;
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
212
iter.remain = count;
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
214
p = drm_coredump_printer(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
320
return count - iter.remain;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1761
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1768
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1769
drm_for_each_connector_iter(connector, &iter)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1772
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1809
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1837
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1838
drm_for_each_connector_iter(connector, &iter)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1841
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
413
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
417
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
419
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
483
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
74
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
77
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
78
drm_for_each_connector_iter(connector, &iter)
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
80
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2755
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2765
drm_connector_list_iter_begin(drm_dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2766
drm_for_each_connector_iter(list_connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2772
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2794
drm_connector_list_iter_begin(drm_dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2795
drm_for_each_connector_iter(list_connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2802
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
100
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
110
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
113
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
114
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
121
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
39
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
44
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
46
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
59
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
67
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
69
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
70
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
80
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
89
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
92
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
93
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3549
struct radix_tree_iter iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3554
radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
3558
radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
769
struct dma_fence_unwrap iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
781
dma_fence_unwrap_for_each(f, &iter, fence)
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
879
struct dma_fence_unwrap iter;
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
891
dma_fence_unwrap_for_each(f, &iter, fence) {
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1232
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1240
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1241
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1247
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1278
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1287
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1288
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1294
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1334
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1357
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1358
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1364
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
332
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
335
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
336
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
373
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
388
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
391
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
392
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
405
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1204
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1209
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1210
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1216
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1248
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1254
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1255
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1261
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1310
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1335
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1336
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1342
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1722
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1730
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1731
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1737
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
311
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
314
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
315
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
342
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
357
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
360
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
361
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
373
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1188
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1197
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1198
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1204
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1249
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1260
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1261
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1267
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1303
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1328
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1329
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1335
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
295
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
298
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
299
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
326
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
341
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
344
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
345
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
357
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
12826
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
12829
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
12830
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
12839
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2686
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2689
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2690
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2713
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2861
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2864
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2865
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2896
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3476
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3600
drm_connector_list_iter_begin(ddev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3601
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3648
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3653
drm_connector_list_iter_begin(ddev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3654
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3674
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
832
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
860
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
861
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
880
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
102
drm_connector_list_iter_begin(ddev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
103
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
122
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
96
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
171
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
180
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
181
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
191
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
4015
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
4022
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
4023
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
4040
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
4041
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
1018
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
901
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
913
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
914
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
965
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
983
struct drm_connector_list_iter iter;
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
986
drm_connector_list_iter_begin(dev, &iter);
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
987
drm_for_each_connector_iter(connector, &iter) {
drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
96
static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter)
drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
98
return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index);
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
145
void *iter = buf;
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
148
radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) {
drivers/gpu/drm/amd/ras/rascore/ras_umc.c
149
data = ras_radix_tree_delete_iter(&ras_umc->root, iter);
drivers/gpu/drm/ast/ast_mode.c
557
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/ast/ast_mode.c
568
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/ast/ast_mode.c
569
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
100
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
78
struct drm_connector_list_iter iter;
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
96
drm_connector_list_iter_begin(ddev, &iter);
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
97
drm_for_each_connector_iter(connector, &iter)
drivers/gpu/drm/display/drm_dp_aux_dev.c
253
struct drm_dp_aux_dev *iter, *aux_dev = NULL;
drivers/gpu/drm/display/drm_dp_aux_dev.c
262
idr_for_each_entry(&aux_idr, iter, id) {
drivers/gpu/drm/display/drm_dp_aux_dev.c
263
if (iter->aux == aux) {
drivers/gpu/drm/display/drm_dp_aux_dev.c
264
aux_dev = iter;
drivers/gpu/drm/drm_bridge.c
1286
struct drm_bridge *iter;
drivers/gpu/drm/drm_bridge.c
1298
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
drivers/gpu/drm/drm_bridge.c
1308
drm_atomic_bridge_propagate_bus_flags(iter, conn,
drivers/gpu/drm/drm_bridge.c
1311
ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
drivers/gpu/drm/drm_bridge.c
1315
if (iter == bridge)
drivers/gpu/drm/drm_bridge.c
774
struct drm_bridge *iter;
drivers/gpu/drm/drm_bridge.c
780
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
drivers/gpu/drm/drm_bridge.c
781
if (iter->funcs->atomic_disable) {
drivers/gpu/drm/drm_bridge.c
782
iter->funcs->atomic_disable(iter, state);
drivers/gpu/drm/drm_bridge.c
783
} else if (iter->funcs->disable) {
drivers/gpu/drm/drm_bridge.c
784
iter->funcs->disable(iter);
drivers/gpu/drm/drm_bridge.c
787
if (iter == bridge)
drivers/gpu/drm/drm_bridge.c
924
struct drm_bridge *iter, *next, *limit;
drivers/gpu/drm/drm_bridge.c
931
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
drivers/gpu/drm/drm_bridge.c
932
if (iter->pre_enable_prev_first) {
drivers/gpu/drm/drm_bridge.c
933
next = iter;
drivers/gpu/drm/drm_bridge.c
954
if (next == iter)
drivers/gpu/drm/drm_bridge.c
964
drm_atomic_bridge_call_pre_enable(iter, state);
drivers/gpu/drm/drm_bridge.c
966
if (iter->pre_enable_prev_first)
drivers/gpu/drm/drm_bridge.c
968
iter = limit;
drivers/gpu/drm/drm_bridge.c
970
if (iter == bridge)
drivers/gpu/drm/drm_buddy.c
241
struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
drivers/gpu/drm/drm_buddy.c
243
while (iter) {
drivers/gpu/drm/drm_buddy.c
247
block = rbtree_get_free_block(iter);
drivers/gpu/drm/drm_buddy.c
248
iter = rb_prev(iter);
drivers/gpu/drm/drm_buddy.c
270
if (iter == &buddy->rb)
drivers/gpu/drm/drm_buddy.c
271
iter = rb_prev(iter);
drivers/gpu/drm/drm_buddy.c
930
struct rb_node *iter;
drivers/gpu/drm/drm_buddy.c
936
iter = rb_last(root);
drivers/gpu/drm/drm_buddy.c
937
while (iter) {
drivers/gpu/drm/drm_buddy.c
938
block = rbtree_get_free_block(iter);
drivers/gpu/drm/drm_buddy.c
965
iter = rb_prev(iter);
drivers/gpu/drm/drm_connector.c
1050
struct drm_connector_list_iter *iter)
drivers/gpu/drm/drm_connector.c
1052
iter->dev = dev;
drivers/gpu/drm/drm_connector.c
1053
iter->conn = NULL;
drivers/gpu/drm/drm_connector.c
1085
drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
drivers/gpu/drm/drm_connector.c
1087
struct drm_connector *old_conn = iter->conn;
drivers/gpu/drm/drm_connector.c
1088
struct drm_mode_config *config = &iter->dev->mode_config;
drivers/gpu/drm/drm_connector.c
1097
iter->conn = NULL;
drivers/gpu/drm/drm_connector.c
1102
iter->conn = list_entry(lhead, struct drm_connector, head);
drivers/gpu/drm/drm_connector.c
1105
} while (!kref_get_unless_zero(&iter->conn->base.refcount));
drivers/gpu/drm/drm_connector.c
1111
return iter->conn;
drivers/gpu/drm/drm_connector.c
1124
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
drivers/gpu/drm/drm_connector.c
1126
struct drm_mode_config *config = &iter->dev->mode_config;
drivers/gpu/drm/drm_connector.c
1129
iter->dev = NULL;
drivers/gpu/drm/drm_connector.c
1130
if (iter->conn) {
drivers/gpu/drm/drm_connector.c
1132
__drm_connector_put_safe(iter->conn);
drivers/gpu/drm/drm_damage_helper.c
225
drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
drivers/gpu/drm/drm_damage_helper.c
230
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_damage_helper.c
235
iter->clips = (struct drm_rect *)drm_plane_get_damage_clips(state);
drivers/gpu/drm/drm_damage_helper.c
236
iter->num_clips = drm_plane_get_damage_clips_count(state);
drivers/gpu/drm/drm_damage_helper.c
241
iter->plane_src.x1 = src.x1 >> 16;
drivers/gpu/drm/drm_damage_helper.c
242
iter->plane_src.y1 = src.y1 >> 16;
drivers/gpu/drm/drm_damage_helper.c
243
iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
drivers/gpu/drm/drm_damage_helper.c
244
iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
drivers/gpu/drm/drm_damage_helper.c
246
if (!iter->clips || state->ignore_damage_clips ||
drivers/gpu/drm/drm_damage_helper.c
248
iter->clips = NULL;
drivers/gpu/drm/drm_damage_helper.c
249
iter->num_clips = 0;
drivers/gpu/drm/drm_damage_helper.c
250
iter->full_update = true;
drivers/gpu/drm/drm_damage_helper.c
272
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
drivers/gpu/drm/drm_damage_helper.c
277
if (iter->full_update) {
drivers/gpu/drm/drm_damage_helper.c
278
*rect = iter->plane_src;
drivers/gpu/drm/drm_damage_helper.c
279
iter->full_update = false;
drivers/gpu/drm/drm_damage_helper.c
283
while (iter->curr_clip < iter->num_clips) {
drivers/gpu/drm/drm_damage_helper.c
284
*rect = iter->clips[iter->curr_clip];
drivers/gpu/drm/drm_damage_helper.c
285
iter->curr_clip++;
drivers/gpu/drm/drm_damage_helper.c
287
if (drm_rect_intersect(rect, &iter->plane_src)) {
drivers/gpu/drm/drm_damage_helper.c
316
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/drm_damage_helper.c
325
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drivers/gpu/drm/drm_damage_helper.c
326
drm_atomic_for_each_plane_damage(&iter, &clip) {
drivers/gpu/drm/drm_displayid.c
100
iter->length = iter->idx + sizeof(*base) + base->bytes;
drivers/gpu/drm/drm_displayid.c
106
struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
108
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_displayid.c
110
iter->drm_edid = drm_edid;
drivers/gpu/drm/drm_displayid.c
111
iter->quirks = get_quirks(drm_edid);
drivers/gpu/drm/drm_displayid.c
115
displayid_iter_block(const struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
119
if (!iter->section)
drivers/gpu/drm/drm_displayid.c
122
block = (const struct displayid_block *)&iter->section[iter->idx];
drivers/gpu/drm/drm_displayid.c
124
if (iter->idx + sizeof(*block) <= iter->length &&
drivers/gpu/drm/drm_displayid.c
125
iter->idx + sizeof(*block) + block->num_bytes <= iter->length)
drivers/gpu/drm/drm_displayid.c
132
__displayid_iter_next(struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
136
if (!iter->drm_edid)
drivers/gpu/drm/drm_displayid.c
139
if (iter->section) {
drivers/gpu/drm/drm_displayid.c
141
block = displayid_iter_block(iter);
drivers/gpu/drm/drm_displayid.c
143
iter->section = NULL;
drivers/gpu/drm/drm_displayid.c
144
iter->drm_edid = NULL;
drivers/gpu/drm/drm_displayid.c
149
iter->idx += sizeof(*block) + block->num_bytes;
drivers/gpu/drm/drm_displayid.c
151
block = displayid_iter_block(iter);
drivers/gpu/drm/drm_displayid.c
158
bool base_section = !iter->section;
drivers/gpu/drm/drm_displayid.c
160
iter->section = find_next_displayid_extension(iter);
drivers/gpu/drm/drm_displayid.c
161
if (!iter->section) {
drivers/gpu/drm/drm_displayid.c
162
iter->drm_edid = NULL;
drivers/gpu/drm/drm_displayid.c
170
base = displayid_get_header(iter->section, iter->length,
drivers/gpu/drm/drm_displayid.c
171
iter->idx);
drivers/gpu/drm/drm_displayid.c
173
iter->version = base->rev;
drivers/gpu/drm/drm_displayid.c
174
iter->primary_use = base->prod_id;
drivers/gpu/drm/drm_displayid.c
178
iter->idx += sizeof(struct displayid_header);
drivers/gpu/drm/drm_displayid.c
180
block = displayid_iter_block(iter);
drivers/gpu/drm/drm_displayid.c
186
void displayid_iter_end(struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
188
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_displayid.c
192
u8 displayid_version(const struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
194
return iter->version;
drivers/gpu/drm/drm_displayid.c
201
u8 displayid_primary_use(const struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
203
return iter->primary_use;
drivers/gpu/drm/drm_displayid.c
82
static const u8 *find_next_displayid_extension(struct displayid_iter *iter)
drivers/gpu/drm/drm_displayid.c
86
bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM);
drivers/gpu/drm/drm_displayid.c
88
displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index);
drivers/gpu/drm/drm_displayid.c
93
iter->length = EDID_LENGTH - 1;
drivers/gpu/drm/drm_displayid.c
94
iter->idx = 1;
drivers/gpu/drm/drm_displayid.c
96
base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum);
drivers/gpu/drm/drm_displayid_internal.h
175
struct displayid_iter *iter);
drivers/gpu/drm/drm_displayid_internal.h
177
__displayid_iter_next(struct displayid_iter *iter);
drivers/gpu/drm/drm_displayid_internal.h
180
void displayid_iter_end(struct displayid_iter *iter);
drivers/gpu/drm/drm_displayid_internal.h
182
u8 displayid_version(const struct displayid_iter *iter);
drivers/gpu/drm/drm_displayid_internal.h
183
u8 displayid_primary_use(const struct displayid_iter *iter);
drivers/gpu/drm/drm_edid.c
1740
struct drm_edid_iter *iter)
drivers/gpu/drm/drm_edid.c
1742
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_edid.c
1744
iter->drm_edid = drm_edid;
drivers/gpu/drm/drm_edid.c
1747
static const void *__drm_edid_iter_next(struct drm_edid_iter *iter)
drivers/gpu/drm/drm_edid.c
1751
if (!iter->drm_edid)
drivers/gpu/drm/drm_edid.c
1754
if (iter->index < drm_edid_block_count(iter->drm_edid))
drivers/gpu/drm/drm_edid.c
1755
block = drm_edid_block_data(iter->drm_edid, iter->index++);
drivers/gpu/drm/drm_edid.c
1763
static void drm_edid_iter_end(struct drm_edid_iter *iter)
drivers/gpu/drm/drm_edid.c
1765
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_edid.c
4208
struct displayid_iter iter;
drivers/gpu/drm/drm_edid.c
4227
displayid_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
4228
displayid_iter_for_each(block, &iter) {
drivers/gpu/drm/drm_edid.c
4234
displayid_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
5034
struct cea_db_iter *iter)
drivers/gpu/drm/drm_edid.c
5036
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_edid.c
5038
drm_edid_iter_begin(drm_edid, &iter->edid_iter);
drivers/gpu/drm/drm_edid.c
5039
displayid_iter_edid_begin(drm_edid, &iter->displayid_iter);
drivers/gpu/drm/drm_edid.c
5043
__cea_db_iter_current_block(const struct cea_db_iter *iter)
drivers/gpu/drm/drm_edid.c
5047
if (!iter->collection)
drivers/gpu/drm/drm_edid.c
5050
db = (const struct cea_db *)&iter->collection[iter->index];
drivers/gpu/drm/drm_edid.c
5052
if (iter->index + sizeof(*db) <= iter->end &&
drivers/gpu/drm/drm_edid.c
5053
iter->index + sizeof(*db) + cea_db_payload_len(db) <= iter->end)
drivers/gpu/drm/drm_edid.c
5078
static const void *__cea_db_iter_edid_next(struct cea_db_iter *iter)
drivers/gpu/drm/drm_edid.c
5082
drm_edid_iter_for_each(ext, &iter->edid_iter) {
drivers/gpu/drm/drm_edid.c
5093
iter->index = 4;
drivers/gpu/drm/drm_edid.c
5094
iter->end = iter->index + size;
drivers/gpu/drm/drm_edid.c
5110
static const void *__cea_db_iter_displayid_next(struct cea_db_iter *iter)
drivers/gpu/drm/drm_edid.c
5114
displayid_iter_for_each(block, &iter->displayid_iter) {
drivers/gpu/drm/drm_edid.c
5122
iter->index = sizeof(*block);
drivers/gpu/drm/drm_edid.c
5123
iter->end = iter->index + block->num_bytes;
drivers/gpu/drm/drm_edid.c
5131
static const struct cea_db *__cea_db_iter_next(struct cea_db_iter *iter)
drivers/gpu/drm/drm_edid.c
5135
if (iter->collection) {
drivers/gpu/drm/drm_edid.c
5137
db = __cea_db_iter_current_block(iter);
drivers/gpu/drm/drm_edid.c
5139
iter->collection = NULL;
drivers/gpu/drm/drm_edid.c
5144
iter->index += sizeof(*db) + cea_db_payload_len(db);
drivers/gpu/drm/drm_edid.c
5146
db = __cea_db_iter_current_block(iter);
drivers/gpu/drm/drm_edid.c
5160
iter->collection = __cea_db_iter_edid_next(iter);
drivers/gpu/drm/drm_edid.c
5161
if (!iter->collection)
drivers/gpu/drm/drm_edid.c
5162
iter->collection = __cea_db_iter_displayid_next(iter);
drivers/gpu/drm/drm_edid.c
5164
if (!iter->collection)
drivers/gpu/drm/drm_edid.c
5167
db = __cea_db_iter_current_block(iter);
drivers/gpu/drm/drm_edid.c
5176
static void cea_db_iter_end(struct cea_db_iter *iter)
drivers/gpu/drm/drm_edid.c
5178
displayid_iter_end(&iter->displayid_iter);
drivers/gpu/drm/drm_edid.c
5179
drm_edid_iter_end(&iter->edid_iter);
drivers/gpu/drm/drm_edid.c
5181
memset(iter, 0, sizeof(*iter));
drivers/gpu/drm/drm_edid.c
5328
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
5334
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
5335
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
5347
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
5679
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
5704
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
5705
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
5733
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
5757
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
5760
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
5761
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
5776
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
5806
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
5809
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
5810
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
5821
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
5893
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
5900
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
5901
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
5907
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
5935
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
5954
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
5955
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
5967
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
6410
struct cea_db_iter iter;
drivers/gpu/drm/drm_edid.c
6440
cea_db_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
6441
cea_db_iter_for_each(db, &iter) {
drivers/gpu/drm/drm_edid.c
6465
cea_db_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
6594
struct displayid_iter iter;
drivers/gpu/drm/drm_edid.c
6596
displayid_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
6597
displayid_iter_for_each(block, &iter) {
drivers/gpu/drm/drm_edid.c
6601
displayid_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
6651
struct displayid_iter iter;
drivers/gpu/drm/drm_edid.c
6653
displayid_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
6654
displayid_iter_for_each(block, &iter) {
drivers/gpu/drm/drm_edid.c
6658
displayid_version(&iter),
drivers/gpu/drm/drm_edid.c
6659
displayid_primary_use(&iter));
drivers/gpu/drm/drm_edid.c
6660
if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
drivers/gpu/drm/drm_edid.c
6661
(displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
drivers/gpu/drm/drm_edid.c
6662
displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
drivers/gpu/drm/drm_edid.c
6671
displayid_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
6926
struct displayid_iter iter;
drivers/gpu/drm/drm_edid.c
6929
displayid_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
6930
displayid_iter_for_each(block, &iter) {
drivers/gpu/drm/drm_edid.c
6938
displayid_iter_end(&iter);
drivers/gpu/drm/drm_edid.c
7552
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
drivers/gpu/drm/drm_edid.c
7555
return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
drivers/gpu/drm/drm_edid.c
7557
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
drivers/gpu/drm/drm_edid.c
7565
struct displayid_iter iter;
drivers/gpu/drm/drm_edid.c
7569
displayid_iter_edid_begin(drm_edid, &iter);
drivers/gpu/drm/drm_edid.c
7570
displayid_iter_for_each(block, &iter) {
drivers/gpu/drm/drm_edid.c
7571
if (displayid_is_tiled_block(&iter, block))
drivers/gpu/drm/drm_edid.c
7574
displayid_iter_end(&iter);
drivers/gpu/drm/drm_fb_dma_helper.c
128
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/drm_fb_dma_helper.c
141
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drivers/gpu/drm/drm_fb_dma_helper.c
143
drm_atomic_for_each_plane_damage(&iter, &clip) {
drivers/gpu/drm/drm_panic.c
556
struct kmsg_dump_iter iter;
drivers/gpu/drm/drm_panic.c
570
kmsg_dump_rewind(&iter);
drivers/gpu/drm/drm_panic.c
571
while (kmsg_dump_get_buffer(&iter, false, kmsg_buf, sizeof(kmsg_buf), &kmsg_len)) {
drivers/gpu/drm/drm_panic.c
653
struct kmsg_dump_iter iter;
drivers/gpu/drm/drm_panic.c
666
kmsg_dump_rewind(&iter);
drivers/gpu/drm/drm_panic.c
667
kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len);
drivers/gpu/drm/drm_panic.c
710
struct kmsg_dump_iter iter;
drivers/gpu/drm/drm_panic.c
715
kmsg_dump_rewind(&iter);
drivers/gpu/drm/drm_panic.c
716
kmsg_dump_get_buffer(&iter, false, qrbuf1, max_kmsg_size, &kmsg_len);
drivers/gpu/drm/drm_syncobj.c
1685
struct dma_fence *iter, *last_signaled =
drivers/gpu/drm/drm_syncobj.c
1692
dma_fence_chain_for_each(iter, fence) {
drivers/gpu/drm/drm_syncobj.c
1693
if (iter->context != fence->context) {
drivers/gpu/drm/drm_syncobj.c
1694
dma_fence_put(iter);
drivers/gpu/drm/drm_syncobj.c
1700
last_signaled = dma_fence_get(iter);
drivers/gpu/drm/drm_vma_manager.c
146
struct rb_node *iter;
drivers/gpu/drm/drm_vma_manager.c
149
iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node;
drivers/gpu/drm/drm_vma_manager.c
152
while (likely(iter)) {
drivers/gpu/drm/drm_vma_manager.c
153
node = rb_entry(iter, struct drm_mm_node, rb);
drivers/gpu/drm/drm_vma_manager.c
156
iter = iter->rb_right;
drivers/gpu/drm/drm_vma_manager.c
161
iter = iter->rb_left;
drivers/gpu/drm/drm_vma_manager.c
247
struct rb_node **iter;
drivers/gpu/drm/drm_vma_manager.c
260
iter = &node->vm_files.rb_node;
drivers/gpu/drm/drm_vma_manager.c
262
while (likely(*iter)) {
drivers/gpu/drm/drm_vma_manager.c
263
parent = *iter;
drivers/gpu/drm/drm_vma_manager.c
264
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
drivers/gpu/drm/drm_vma_manager.c
271
iter = &(*iter)->rb_right;
drivers/gpu/drm/drm_vma_manager.c
273
iter = &(*iter)->rb_left;
drivers/gpu/drm/drm_vma_manager.c
284
rb_link_node(&new->vm_rb, parent, iter);
drivers/gpu/drm/drm_vma_manager.c
362
struct rb_node *iter;
drivers/gpu/drm/drm_vma_manager.c
366
iter = node->vm_files.rb_node;
drivers/gpu/drm/drm_vma_manager.c
367
while (likely(iter)) {
drivers/gpu/drm/drm_vma_manager.c
368
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
drivers/gpu/drm/drm_vma_manager.c
376
iter = iter->rb_right;
drivers/gpu/drm/drm_vma_manager.c
378
iter = iter->rb_left;
drivers/gpu/drm/drm_vma_manager.c
403
struct rb_node *iter;
drivers/gpu/drm/drm_vma_manager.c
407
iter = node->vm_files.rb_node;
drivers/gpu/drm/drm_vma_manager.c
408
while (likely(iter)) {
drivers/gpu/drm/drm_vma_manager.c
409
entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
drivers/gpu/drm/drm_vma_manager.c
413
iter = iter->rb_right;
drivers/gpu/drm/drm_vma_manager.c
415
iter = iter->rb_left;
drivers/gpu/drm/drm_vma_manager.c
420
return iter;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
100
static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
103
etnaviv_iommu_dump(mmu, iter->data);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
105
etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
108
static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
111
memcpy(iter->data, ptr, size);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
113
iter->hdr->iova = cpu_to_le64(iova);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
115
etnaviv_core_dump_header(iter, type, iter->data + size);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
121
struct core_dump_iterator iter;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
159
file_size += sizeof(*iter.hdr) * n_obj;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
162
iter.start = __vmalloc(file_size, GFP_NOWAIT);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
163
if (!iter.start) {
drivers/gpu/drm/etnaviv/etnaviv_dump.c
170
iter.hdr = iter.start;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
171
iter.data = &iter.hdr[n_obj];
drivers/gpu/drm/etnaviv/etnaviv_dump.c
173
memset(iter.hdr, 0, iter.data - iter.start);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
175
etnaviv_core_dump_registers(&iter, gpu);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
176
etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
177
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
182
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
191
bomap_start = bomap = iter.data;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
193
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
214
iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
drivers/gpu/drm/etnaviv/etnaviv_dump.c
220
iter.hdr->iova = cpu_to_le64(vram->iova);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
224
memcpy(iter.data, vaddr, obj->base.size);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
226
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
drivers/gpu/drm/etnaviv/etnaviv_dump.c
230
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
232
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_NOWAIT);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
67
static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
70
struct etnaviv_dump_object_header *hdr = iter->hdr;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
74
hdr->file_offset = cpu_to_le32(iter->data - iter->start);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
75
hdr->file_size = cpu_to_le32(data_end - iter->data);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
77
iter->hdr++;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
78
iter->data += le32_to_cpu(hdr->file_size);
drivers/gpu/drm/etnaviv/etnaviv_dump.c
81
static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
drivers/gpu/drm/etnaviv/etnaviv_dump.c
84
struct etnaviv_dump_registers *reg = iter->data;
drivers/gpu/drm/etnaviv/etnaviv_dump.c
97
etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
509
if (domain->iter >= nr_domains)
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
512
dom = pm_domain(gpu, domain->iter);
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
516
domain->id = domain->iter;
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
520
domain->iter++;
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
521
if (domain->iter == nr_domains)
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
522
domain->iter = 0xff;
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
541
if (signal->iter >= dom->nr_signals)
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
544
sig = &dom->signal[signal->iter];
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
546
signal->id = signal->iter;
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
549
signal->iter++;
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
550
if (signal->iter == dom->nr_signals)
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
551
signal->iter = 0xffff;
drivers/gpu/drm/gud/gud_pipe.c
627
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/gud/gud_pipe.c
650
drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
drivers/gpu/drm/gud/gud_pipe.c
651
drm_atomic_for_each_plane_damage(&iter, &damage)
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
179
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
181
drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
182
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/i915/display/intel_display.h
283
#define for_each_intel_connector_iter(intel_connector, iter) \
drivers/gpu/drm/i915/display/intel_display.h
284
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
drivers/gpu/drm/i915/display/skl_watermark.c
1394
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
drivers/gpu/drm/i915/display/skl_watermark.c
1401
if (data_rate && iter->data_rate) {
drivers/gpu/drm/i915/display/skl_watermark.c
1402
extra = min_t(u16, iter->size,
drivers/gpu/drm/i915/display/skl_watermark.c
1403
DIV64_U64_ROUND_UP(iter->size * data_rate,
drivers/gpu/drm/i915/display/skl_watermark.c
1404
iter->data_rate));
drivers/gpu/drm/i915/display/skl_watermark.c
1405
iter->size -= extra;
drivers/gpu/drm/i915/display/skl_watermark.c
1406
iter->data_rate -= data_rate;
drivers/gpu/drm/i915/display/skl_watermark.c
1416
iter->start = skl_ddb_entry_init(ddb, iter->start,
drivers/gpu/drm/i915/display/skl_watermark.c
1417
iter->start + size);
drivers/gpu/drm/i915/display/skl_watermark.c
1431
struct skl_plane_ddb_iter iter;
drivers/gpu/drm/i915/display/skl_watermark.c
1448
iter.start = alloc->start;
drivers/gpu/drm/i915/display/skl_watermark.c
1449
iter.size = skl_ddb_entry_size(alloc);
drivers/gpu/drm/i915/display/skl_watermark.c
1450
if (iter.size == 0)
drivers/gpu/drm/i915/display/skl_watermark.c
1455
iter.size -= cursor_size;
drivers/gpu/drm/i915/display/skl_watermark.c
1459
iter.data_rate = skl_total_relative_data_rate(crtc_state);
drivers/gpu/drm/i915/display/skl_watermark.c
1488
if (blocks <= iter.size) {
drivers/gpu/drm/i915/display/skl_watermark.c
1489
iter.size -= blocks;
drivers/gpu/drm/i915/display/skl_watermark.c
1498
blocks, iter.size);
drivers/gpu/drm/i915/display/skl_watermark.c
1503
if (iter.data_rate == 0)
drivers/gpu/drm/i915/display/skl_watermark.c
1504
iter.size = 0;
drivers/gpu/drm/i915/display/skl_watermark.c
1527
skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
drivers/gpu/drm/i915/display/skl_watermark.c
1529
skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
drivers/gpu/drm/i915/display/skl_watermark.c
1532
skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
drivers/gpu/drm/i915/display/skl_watermark.c
1541
drm_WARN_ON(display->drm, iter.size != 0 || iter.data_rate != 0);
drivers/gpu/drm/i915/gem/i915_gem_context.c
106
struct radix_tree_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_context.c
111
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
drivers/gpu/drm/i915/gem/i915_gem_context.c
124
if (lut->handle != iter.index)
drivers/gpu/drm/i915/gem/i915_gem_context.c
134
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
drivers/gpu/drm/i915/gem/i915_gem_object.h
376
struct i915_gem_object_page_iter *iter,
drivers/gpu/drm/i915/gem/i915_gem_pages.c
181
struct radix_tree_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
185
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
drivers/gpu/drm/i915/gem/i915_gem_pages.c
186
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
187
radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
drivers/gpu/drm/i915/gem/i915_gem_pages.c
188
radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
280
struct sgt_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
323
for_each_sgt_page(page, iter, obj->mm.pages)
drivers/gpu/drm/i915/gem/i915_gem_pages.c
339
struct sgt_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
353
for_each_sgt_daddr(addr, iter, obj->mm.pages)
drivers/gpu/drm/i915/gem/i915_gem_pages.c
382
struct sgt_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
390
for_each_sgt_page(page, iter, obj->mm.pages)
drivers/gpu/drm/i915/gem/i915_gem_pages.c
646
struct i915_gem_object_page_iter *iter,
drivers/gpu/drm/i915/gem/i915_gem_pages.c
651
const bool dma = iter == &obj->mm.get_dma_page ||
drivers/gpu/drm/i915/gem/i915_gem_pages.c
652
iter == &obj->ttm.get_io_page;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
670
if (n < READ_ONCE(iter->sg_idx))
drivers/gpu/drm/i915/gem/i915_gem_pages.c
673
mutex_lock(&iter->lock);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
680
sg = iter->sg_pos;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
681
idx = iter->sg_idx;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
697
ret = radix_tree_insert(&iter->radix, idx, sg);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
703
ret = radix_tree_insert(&iter->radix, idx + i, entry);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
714
iter->sg_pos = sg;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
715
iter->sg_idx = idx;
drivers/gpu/drm/i915/gem/i915_gem_pages.c
717
mutex_unlock(&iter->lock);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
737
sg = radix_tree_lookup(&iter->radix, n);
drivers/gpu/drm/i915/gem/i915_gem_pages.c
750
sg = radix_tree_lookup(&iter->radix, base);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
414
struct iov_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
447
iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)user_data, size);
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
449
written = file->f_op->write_iter(&kiocb, &iter);
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
395
struct radix_tree_iter iter;
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
402
radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
403
radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
drivers/gpu/drm/i915/gem/i915_gem_wait.c
125
struct dma_fence *iter;
drivers/gpu/drm/i915/gem/i915_gem_wait.c
128
dma_fence_chain_for_each(iter, fence) {
drivers/gpu/drm/i915/gem/i915_gem_wait.c
129
fence_set_priority(to_dma_fence_chain(iter)->fence,
drivers/gpu/drm/i915/gem/i915_gem_wait.c
133
dma_fence_put(iter);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
121
struct sgt_dma iter = sgt_dma(vma_res);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
128
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
129
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
131
iter.dma += I915_GTT_PAGE_SIZE;
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
132
if (iter.dma == iter.max) {
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
133
iter.sg = __sg_next(iter.sg);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
134
if (!iter.sg || sg_dma_len(iter.sg) == 0)
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
137
iter.dma = sg_dma_address(iter.sg);
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
138
iter.max = iter.dma + sg_dma_len(iter.sg);
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
57
#define gen6_for_each_pde(pt, pd, start, length, iter) \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
58
for (iter = gen6_pde_index(start); \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
59
length > 0 && iter < I915_PDES && \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
60
(pt = i915_pt_entry(pd, iter), true); \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
63
start += temp; length -= temp; }), ++iter)
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
65
#define gen6_for_all_pdes(pt, pd, iter) \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
66
for (iter = 0; \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
67
iter < I915_PDES && \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
68
(pt = i915_pt_entry(pd, iter), true); \
drivers/gpu/drm/i915/gt/gen6_ppgtt.h
69
++iter)
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
457
struct sgt_dma *iter,
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
469
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
470
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
472
iter->dma += I915_GTT_PAGE_SIZE;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
473
if (iter->dma >= iter->max) {
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
474
iter->sg = __sg_next(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
475
if (!iter->sg || sg_dma_len(iter->sg) == 0) {
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
480
iter->dma = sg_dma_address(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
481
iter->max = iter->dma + sg_dma_len(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
505
struct sgt_dma *iter,
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
510
unsigned int rem = sg_dma_len(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
532
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
565
} else if (IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
582
encode | (iter->dma + i *
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
587
iter->dma += page_size;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
589
if (iter->dma >= iter->max) {
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
590
iter->sg = __sg_next(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
591
if (!iter->sg)
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
594
rem = sg_dma_len(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
598
iter->dma = sg_dma_address(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
599
iter->max = iter->dma + rem;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
601
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
608
} while (iter->sg && sg_dma_len(iter->sg));
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
613
struct sgt_dma *iter,
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
618
unsigned int rem = sg_dma_len(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
635
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
652
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
661
GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
662
vaddr[index++] = encode | iter->dma;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
665
iter->dma += page_size;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
667
if (iter->dma >= iter->max) {
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
668
iter->sg = __sg_next(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
669
if (!iter->sg)
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
672
rem = sg_dma_len(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
676
iter->dma = sg_dma_address(iter->sg);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
677
iter->max = iter->dma + rem;
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
680
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
685
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
701
!iter->sg && IS_ALIGNED(vma_res->start +
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
732
} while (iter->sg && sg_dma_len(iter->sg));
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
741
struct sgt_dma iter = sgt_dma(vma_res);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
745
xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
747
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
755
idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1776
int iter;
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1796
for_each_ss_steering(iter, engine->gt, slice, subslice) {
drivers/gpu/drm/i915/gt/intel_engine_cs.c
1808
for_each_ss_steering(iter, engine->gt, slice, subslice)
drivers/gpu/drm/i915/gt/intel_ggtt.c
347
struct sgt_iter iter;
drivers/gpu/drm/i915/gt/intel_ggtt.c
361
iter = __sgt_iter(pages->sgl, true);
drivers/gpu/drm/i915/gt/intel_ggtt.c
396
for_each_sgt_daddr_next(addr, iter) {
drivers/gpu/drm/i915/gt/intel_ggtt.c
501
struct sgt_iter iter;
drivers/gpu/drm/i915/gt/intel_ggtt.c
516
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
drivers/gpu/drm/i915/gt/intel_ggtt.c
657
struct sgt_iter iter;
drivers/gpu/drm/i915/gt/intel_ggtt.c
667
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
296
int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
316
for_each_ss_steering(iter, gt, slice, subslice)
drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
333
for_each_ss_steering(iter, gt, slice, subslice) {
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1282
struct sgt_iter iter;
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1294
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
1314
for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
drivers/gpu/drm/i915/gvt/gtt.c
703
struct radix_tree_iter iter;
drivers/gpu/drm/i915/gvt/gtt.c
708
radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
drivers/gpu/drm/i915/gvt/handlers.c
2870
static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
drivers/gpu/drm/i915/gvt/handlers.c
2873
struct intel_gvt *gvt = iter->data;
drivers/gpu/drm/i915/gvt/handlers.c
2909
static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
drivers/gpu/drm/i915/gvt/handlers.c
2912
struct intel_gvt *gvt = iter->data;
drivers/gpu/drm/i915/gvt/handlers.c
2936
static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
drivers/gpu/drm/i915/gvt/handlers.c
2940
return handle_mmio(iter, offset, size);
drivers/gpu/drm/i915/gvt/handlers.c
2942
return handle_mmio_block(iter, offset, size);
drivers/gpu/drm/i915/gvt/handlers.c
2947
struct intel_gvt_mmio_table_iter iter = {
drivers/gpu/drm/i915/gvt/handlers.c
2953
return intel_gvt_iterate_mmio_table(&iter);
drivers/gpu/drm/i915/i915_deps.c
225
struct dma_resv_iter iter;
drivers/gpu/drm/i915/i915_deps.c
229
dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) {
drivers/gpu/drm/i915/i915_gpu_error.c
1150
struct sgt_iter iter;
drivers/gpu/drm/i915/i915_gpu_error.c
1181
for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
drivers/gpu/drm/i915/i915_gpu_error.c
1211
for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
drivers/gpu/drm/i915/i915_gpu_error.c
1231
for_each_sgt_page(page, iter, vma_res->bi.pages) {
drivers/gpu/drm/i915/i915_gpu_error.c
444
int iter;
drivers/gpu/drm/i915/i915_gpu_error.c
458
for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
drivers/gpu/drm/i915/i915_gpu_error.c
463
for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
drivers/gpu/drm/i915/i915_gpu_error.c
472
for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
drivers/gpu/drm/i915/i915_gpu_error.c
84
__sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
drivers/gpu/drm/i915/i915_gpu_error.c
85
e->iter += e->bytes;
drivers/gpu/drm/i915/i915_gpu_error.c
983
__sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
drivers/gpu/drm/i915/i915_gpu_error.h
239
loff_t iter;
drivers/gpu/drm/i915/i915_request.c
1361
struct dma_fence *iter;
drivers/gpu/drm/i915/i915_request.c
1367
dma_fence_chain_for_each(iter, fence) {
drivers/gpu/drm/i915/i915_request.c
1368
struct dma_fence_chain *chain = to_dma_fence_chain(iter);
drivers/gpu/drm/i915/i915_request.c
1371
err = __i915_request_await_external(rq, iter);
drivers/gpu/drm/i915/i915_request.c
1380
dma_fence_put(iter);
drivers/gpu/drm/i915/i915_vma.c
1145
struct scatterlist *iter;
drivers/gpu/drm/i915/i915_vma.c
1148
iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
drivers/gpu/drm/i915/i915_vma.c
1149
GEM_BUG_ON(!iter);
drivers/gpu/drm/i915/i915_vma.c
1154
len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
drivers/gpu/drm/i915/i915_vma.c
1158
sg_dma_address(iter) + (offset << PAGE_SHIFT);
drivers/gpu/drm/i915/i915_vma.c
1167
iter = __sg_next(iter);
drivers/gpu/drm/i915/intel_gvt.c
105
save_mmio(iter, offset, size);
drivers/gpu/drm/i915/intel_gvt.c
113
struct intel_gvt_mmio_table_iter iter;
drivers/gpu/drm/i915/intel_gvt.c
134
iter.i915 = dev_priv;
drivers/gpu/drm/i915/intel_gvt.c
135
iter.data = vgpu->initial_mmio;
drivers/gpu/drm/i915/intel_gvt.c
136
iter.handle_mmio_cb = handle_mmio;
drivers/gpu/drm/i915/intel_gvt.c
138
ret = intel_gvt_iterate_mmio_table(&iter);
drivers/gpu/drm/i915/intel_gvt.c
86
static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
drivers/gpu/drm/i915/intel_gvt.c
89
struct drm_i915_private *dev_priv = iter->i915;
drivers/gpu/drm/i915/intel_gvt.c
93
mmio = iter->data + i;
drivers/gpu/drm/i915/intel_gvt.c
99
static int handle_mmio(struct intel_gvt_mmio_table_iter *iter,
drivers/gpu/drm/i915/intel_gvt.h
36
int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter,
drivers/gpu/drm/i915/intel_gvt.h
44
int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter);
drivers/gpu/drm/i915/intel_gvt.h
72
static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1114
static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1116
struct drm_i915_private *dev_priv = iter->i915;
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1277
int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1279
struct drm_i915_private *i915 = iter->i915;
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1282
ret = iterate_generic_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1287
ret = iterate_bdw_only_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1290
ret = iterate_bdw_plus_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1293
ret = iterate_pre_skl_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1300
ret = iterate_bdw_plus_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1303
ret = iterate_skl_plus_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1307
ret = iterate_bdw_plus_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1310
ret = iterate_skl_plus_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
1313
ret = iterate_bxt_mmio(iter);
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
45
ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
57
if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
64
static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
66
struct drm_i915_private *dev_priv = iter->i915;
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
718
static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
743
static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
745
struct drm_i915_private *dev_priv = iter->i915;
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
896
static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
915
static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
917
struct drm_i915_private *dev_priv = iter->i915;
drivers/gpu/drm/imagination/pvr_sync.c
208
struct dma_fence_unwrap iter;
drivers/gpu/drm/imagination/pvr_sync.c
213
dma_fence_unwrap_for_each(uf, &iter, f) {
drivers/gpu/drm/imagination/pvr_sync.c
222
dma_fence_unwrap_for_each(uf, &iter, f) {
drivers/gpu/drm/mgag200/mgag200_mode.c
520
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/mgag200/mgag200_mode.c
524
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/mgag200/mgag200_mode.c
525
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
13
struct drm_print_iterator iter;
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
19
iter.data = buffer;
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
20
iter.offset = 0;
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
21
iter.start = offset;
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
22
iter.remain = count;
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
24
p = drm_coredump_printer(&iter);
drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
28
return count - iter.remain;
drivers/gpu/drm/msm/msm_gpu.c
182
struct drm_print_iterator iter;
drivers/gpu/drm/msm/msm_gpu.c
190
iter.data = buffer;
drivers/gpu/drm/msm/msm_gpu.c
191
iter.offset = 0;
drivers/gpu/drm/msm/msm_gpu.c
192
iter.start = offset;
drivers/gpu/drm/msm/msm_gpu.c
193
iter.remain = count;
drivers/gpu/drm/msm/msm_gpu.c
195
p = drm_coredump_printer(&iter);
drivers/gpu/drm/msm/msm_gpu.c
210
return count - iter.remain;
drivers/gpu/drm/mxsfb/mxsfb_drv.c
118
struct drm_connector_list_iter iter;
drivers/gpu/drm/mxsfb/mxsfb_drv.c
148
drm_connector_list_iter_begin(drm, &iter);
drivers/gpu/drm/mxsfb/mxsfb_drv.c
149
mxsfb->connector = drm_connector_list_iter_next(&iter);
drivers/gpu/drm/mxsfb/mxsfb_drv.c
150
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/nouveau/nouveau_connector.h
176
#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
drivers/gpu/drm/nouveau/nouveau_connector.h
177
drm_for_each_connector_iter(connector, iter) \
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1713
const struct gf100_gr_pack *iter;
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1725
pack_for_each_init(init, iter, pack) {
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
797
acpi_handle iter = NULL, handle_mux = NULL;
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
805
status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
806
if (ACPI_FAILURE(status) || !iter)
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
809
status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
813
handle_mux = iter;
drivers/gpu/drm/panfrost/panfrost_dump.c
100
panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
drivers/gpu/drm/panfrost/panfrost_dump.c
106
struct panfrost_dump_iterator iter;
drivers/gpu/drm/panfrost/panfrost_dump.c
153
file_size += sizeof(*iter.hdr) * n_obj;
drivers/gpu/drm/panfrost/panfrost_dump.c
163
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
drivers/gpu/drm/panfrost/panfrost_dump.c
165
if (!iter.start) {
drivers/gpu/drm/panfrost/panfrost_dump.c
171
iter.hdr = iter.start;
drivers/gpu/drm/panfrost/panfrost_dump.c
172
iter.data = &iter.hdr[n_obj];
drivers/gpu/drm/panfrost/panfrost_dump.c
174
memset(iter.hdr, 0, iter.data - iter.start);
drivers/gpu/drm/panfrost/panfrost_dump.c
180
iter.hdr->reghdr.jc = job->jc;
drivers/gpu/drm/panfrost/panfrost_dump.c
181
iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
drivers/gpu/drm/panfrost/panfrost_dump.c
182
iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
drivers/gpu/drm/panfrost/panfrost_dump.c
183
iter.hdr->reghdr.gpu_id = pfdev->features.id;
drivers/gpu/drm/panfrost/panfrost_dump.c
184
iter.hdr->reghdr.nbos = job->bo_count;
drivers/gpu/drm/panfrost/panfrost_dump.c
186
panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
drivers/gpu/drm/panfrost/panfrost_dump.c
190
bomap_start = bomap = iter.data;
drivers/gpu/drm/panfrost/panfrost_dump.c
192
panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BOMAP,
drivers/gpu/drm/panfrost/panfrost_dump.c
208
iter.hdr->bomap.valid = 0;
drivers/gpu/drm/panfrost/panfrost_dump.c
215
iter.hdr->bomap.valid = 0;
drivers/gpu/drm/panfrost/panfrost_dump.c
221
iter.hdr->bomap.data[0] = bomap - bomap_start;
drivers/gpu/drm/panfrost/panfrost_dump.c
226
iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
drivers/gpu/drm/panfrost/panfrost_dump.c
229
memcpy(iter.data, vaddr, bo->base.base.size);
drivers/gpu/drm/panfrost/panfrost_dump.c
233
iter.hdr->bomap.valid = 1;
drivers/gpu/drm/panfrost/panfrost_dump.c
235
dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
drivers/gpu/drm/panfrost/panfrost_dump.c
238
panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
drivers/gpu/drm/panfrost/panfrost_dump.c
240
dev_coredumpv(pfdev->base.dev, iter.start, iter.data - iter.start, GFP_KERNEL);
drivers/gpu/drm/panfrost/panfrost_dump.c
61
static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
drivers/gpu/drm/panfrost/panfrost_dump.c
64
struct panfrost_dump_object_header *hdr = iter->hdr;
drivers/gpu/drm/panfrost/panfrost_dump.c
68
hdr->file_offset = iter->data - iter->start;
drivers/gpu/drm/panfrost/panfrost_dump.c
69
hdr->file_size = data_end - iter->data;
drivers/gpu/drm/panfrost/panfrost_dump.c
71
iter->hdr++;
drivers/gpu/drm/panfrost/panfrost_dump.c
72
iter->data += hdr->file_size;
drivers/gpu/drm/panfrost/panfrost_dump.c
76
panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
drivers/gpu/drm/panfrost/panfrost_dump.c
80
struct panfrost_dump_registers *dumpreg = iter->data;
drivers/gpu/drm/panfrost/panfrost_gem.c
113
struct panfrost_gem_mapping *iter, *mapping = NULL;
drivers/gpu/drm/panfrost/panfrost_gem.c
116
list_for_each_entry(iter, &bo->mappings.list, node) {
drivers/gpu/drm/panfrost/panfrost_gem.c
117
if (iter->mmu == priv->mmu) {
drivers/gpu/drm/panfrost/panfrost_gem.c
118
kref_get(&iter->refcount);
drivers/gpu/drm/panfrost/panfrost_gem.c
119
mapping = iter;
drivers/gpu/drm/panfrost/panfrost_gem.c
227
struct panfrost_gem_mapping *mapping = NULL, *iter;
drivers/gpu/drm/panfrost/panfrost_gem.c
230
list_for_each_entry(iter, &bo->mappings.list, node) {
drivers/gpu/drm/panfrost/panfrost_gem.c
231
if (iter->mmu == priv->mmu) {
drivers/gpu/drm/panfrost/panfrost_gem.c
232
mapping = iter;
drivers/gpu/drm/panfrost/panfrost_gem.c
233
list_del(&iter->node);
drivers/gpu/drm/panthor/panthor_fw.c
410
struct panthor_fw_binary_iter *iter,
drivers/gpu/drm/panthor/panthor_fw.c
413
size_t new_offset = iter->offset + size;
drivers/gpu/drm/panthor/panthor_fw.c
415
if (new_offset > iter->size || new_offset < iter->offset) {
drivers/gpu/drm/panthor/panthor_fw.c
420
memcpy(out, iter->data + iter->offset, size);
drivers/gpu/drm/panthor/panthor_fw.c
421
iter->offset = new_offset;
drivers/gpu/drm/panthor/panthor_fw.c
426
struct panthor_fw_binary_iter *iter,
drivers/gpu/drm/panthor/panthor_fw.c
430
size_t new_offset = iter->offset + size;
drivers/gpu/drm/panthor/panthor_fw.c
432
if (new_offset > iter->size || new_offset < iter->offset) {
drivers/gpu/drm/panthor/panthor_fw.c
438
sub_iter->data = iter->data + iter->offset;
drivers/gpu/drm/panthor/panthor_fw.c
440
iter->offset = new_offset;
drivers/gpu/drm/panthor/panthor_fw.c
537
struct panthor_fw_binary_iter *iter,
drivers/gpu/drm/panthor/panthor_fw.c
547
ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
drivers/gpu/drm/panthor/panthor_fw.c
594
name_len = iter->size - iter->offset;
drivers/gpu/drm/panthor/panthor_fw.c
620
memcpy(name, iter->data + iter->offset, name_len);
drivers/gpu/drm/panthor/panthor_fw.c
683
struct panthor_fw_binary_iter *iter,
drivers/gpu/drm/panthor/panthor_fw.c
691
ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
drivers/gpu/drm/panthor/panthor_fw.c
740
struct panthor_fw_binary_iter *iter)
drivers/gpu/drm/panthor/panthor_fw.c
746
ret = panthor_fw_binary_iter_read(ptdev, iter, &ehdr, sizeof(ehdr));
drivers/gpu/drm/panthor/panthor_fw.c
750
if ((iter->offset % sizeof(u32)) ||
drivers/gpu/drm/panthor/panthor_fw.c
753
(u32)(iter->offset - sizeof(u32)), CSF_FW_BINARY_ENTRY_SIZE(ehdr));
drivers/gpu/drm/panthor/panthor_fw.c
757
if (panthor_fw_binary_sub_iter_init(ptdev, iter, &eiter,
drivers/gpu/drm/panthor/panthor_fw.c
789
struct panthor_fw_binary_iter iter = {};
drivers/gpu/drm/panthor/panthor_fw.c
806
iter.data = fw->data;
drivers/gpu/drm/panthor/panthor_fw.c
807
iter.size = fw->size;
drivers/gpu/drm/panthor/panthor_fw.c
808
ret = panthor_fw_binary_iter_read(ptdev, &iter, &hdr, sizeof(hdr));
drivers/gpu/drm/panthor/panthor_fw.c
825
if (hdr.size > iter.size) {
drivers/gpu/drm/panthor/panthor_fw.c
830
iter.size = hdr.size;
drivers/gpu/drm/panthor/panthor_fw.c
832
while (iter.offset < hdr.size) {
drivers/gpu/drm/panthor/panthor_fw.c
833
ret = panthor_fw_load_entry(ptdev, fw, &iter);
drivers/gpu/drm/sitronix/st7571.c
349
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/sitronix/st7571.c
365
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/sitronix/st7571.c
366
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/sitronix/st7920.c
399
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/sitronix/st7920.c
410
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/sitronix/st7920.c
411
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/solomon/ssd130x.c
1207
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/solomon/ssd130x.c
1219
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/solomon/ssd130x.c
1220
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/solomon/ssd130x.c
1248
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/solomon/ssd130x.c
1260
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/solomon/ssd130x.c
1261
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/solomon/ssd130x.c
1288
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/solomon/ssd130x.c
1300
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/solomon/ssd130x.c
1301
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/stm/ltdc.c
901
struct drm_connector_list_iter iter;
drivers/gpu/drm/stm/ltdc.c
929
drm_connector_list_iter_begin(ddev, &iter);
drivers/gpu/drm/stm/ltdc.c
930
drm_for_each_connector_iter(connector, &iter)
drivers/gpu/drm/stm/ltdc.c
933
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/sun4i/sun4i_tcon.c
46
struct drm_connector_list_iter iter;
drivers/gpu/drm/sun4i/sun4i_tcon.c
48
drm_connector_list_iter_begin(encoder->dev, &iter);
drivers/gpu/drm/sun4i/sun4i_tcon.c
49
drm_for_each_connector_iter(connector, &iter)
drivers/gpu/drm/sun4i/sun4i_tcon.c
51
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/sun4i/sun4i_tcon.c
54
drm_connector_list_iter_end(&iter);
drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
343
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
354
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
355
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/tests/drm_damage_helper_test.c
122
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
129
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
130
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
140
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
149
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
150
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
161
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
169
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
170
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
180
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
189
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
190
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
200
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
208
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
209
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
218
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
226
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
227
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
236
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
244
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
245
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
254
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
266
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
267
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
277
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
288
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
289
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
299
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
311
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
312
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
322
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
334
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
335
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
344
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
358
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
359
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
369
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
384
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
385
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
396
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
411
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
412
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
421
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
434
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
435
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
446
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
461
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
462
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
473
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
486
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
487
drm_atomic_for_each_plane_damage(&iter, &clip) {
drivers/gpu/drm/tests/drm_damage_helper_test.c
501
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
516
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
517
drm_atomic_for_each_plane_damage(&iter, &clip) {
drivers/gpu/drm/tests/drm_damage_helper_test.c
531
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
544
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
545
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
555
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
570
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
571
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tests/drm_damage_helper_test.c
582
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tests/drm_damage_helper_test.c
599
drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
drivers/gpu/drm/tests/drm_damage_helper_test.c
600
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/tilcdc/tilcdc_external.c
63
struct drm_encoder *encoder = NULL, *iter;
drivers/gpu/drm/tilcdc/tilcdc_external.c
65
list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
drivers/gpu/drm/tilcdc/tilcdc_external.c
66
if (iter->possible_crtcs & (1 << priv->crtc->index)) {
drivers/gpu/drm/tilcdc/tilcdc_external.c
67
encoder = iter;
drivers/gpu/drm/tiny/appletbdrm.c
326
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tiny/appletbdrm.c
344
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, new_plane_state);
drivers/gpu/drm/tiny/appletbdrm.c
345
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/tiny/appletbdrm.c
380
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tiny/appletbdrm.c
409
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drivers/gpu/drm/tiny/appletbdrm.c
410
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/tiny/bochs.c
453
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tiny/bochs.c
459
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/tiny/bochs.c
460
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/tiny/cirrus-qemu.c
338
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/tiny/cirrus-qemu.c
353
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/tiny/cirrus-qemu.c
354
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/ttm/ttm_resource.c
720
static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
drivers/gpu/drm/ttm/ttm_resource.c
725
container_of(iter, typeof(*iter_io), base);
drivers/gpu/drm/ttm/ttm_resource.c
751
static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
drivers/gpu/drm/ttm/ttm_resource.c
802
static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
drivers/gpu/drm/ttm/ttm_resource.c
807
container_of(iter, typeof(*iter_io), base);
drivers/gpu/drm/ttm/ttm_tt.c
487
static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
drivers/gpu/drm/ttm/ttm_tt.c
492
container_of(iter, typeof(*iter_tt), base);
drivers/gpu/drm/ttm/ttm_tt.c
498
static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
drivers/gpu/drm/udl/udl_modeset.c
289
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/udl/udl_modeset.c
303
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drivers/gpu/drm/udl/udl_modeset.c
304
drm_atomic_for_each_plane_damage(&iter, &damage) {
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
327
struct sg_dma_page_iter iter;
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
38
struct vmw_piter *iter,
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
85
*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
87
*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
91
vmw_piter_next(iter);
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1737
struct drm_atomic_helper_damage_iter iter;
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1753
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1754
drm_atomic_for_each_plane_damage(&iter, &clip)
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1821
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1822
drm_atomic_for_each_plane_damage(&iter, &clip) {
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
126
struct vmw_piter iter;
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
132
vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
133
WARN_ON(!vmw_piter_next(&iter));
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
143
mob->pt_root_page = vmw_piter_dma_addr(&iter);
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
149
vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
119
__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
77
return __sg_page_iter_dma_next(&viter->iter) && ret;
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
88
return sg_page_iter_dma_address(&viter->iter);
drivers/gpu/drm/xe/xe_devcoredump.c
100
p = drm_coredump_printer(&iter);
drivers/gpu/drm/xe/xe_devcoredump.c
136
return count - iter.remain;
drivers/gpu/drm/xe/xe_devcoredump.c
89
struct drm_print_iterator iter;
drivers/gpu/drm/xe/xe_devcoredump.c
96
iter.data = buffer;
drivers/gpu/drm/xe/xe_devcoredump.c
97
iter.start = start;
drivers/gpu/drm/xe/xe_devcoredump.c
98
iter.remain = count;
drivers/gpu/drm/xe/xe_query.c
369
int iter = 0;
drivers/gpu/drm/xe/xe_query.c
387
gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA;
drivers/gpu/drm/xe/xe_query.c
389
gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN;
drivers/gpu/drm/xe/xe_query.c
390
gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id;
drivers/gpu/drm/xe/xe_query.c
391
gt_list->gt_list[iter].gt_id = gt->info.id;
drivers/gpu/drm/xe/xe_query.c
392
gt_list->gt_list[iter].reference_clock = gt->info.reference_clock;
drivers/gpu/drm/xe/xe_query.c
408
gt_list->gt_list[iter].near_mem_regions = 0x1;
drivers/gpu/drm/xe/xe_query.c
410
gt_list->gt_list[iter].near_mem_regions =
drivers/gpu/drm/xe/xe_query.c
412
gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
drivers/gpu/drm/xe/xe_query.c
413
gt_list->gt_list[iter].near_mem_regions;
drivers/gpu/drm/xe/xe_query.c
415
gt_list->gt_list[iter].ip_ver_major =
drivers/gpu/drm/xe/xe_query.c
417
gt_list->gt_list[iter].ip_ver_minor =
drivers/gpu/drm/xe/xe_query.c
419
gt_list->gt_list[iter].ip_ver_rev =
drivers/gpu/drm/xe/xe_query.c
422
iter++;
drivers/gpu/drm/xe/xe_sched_job.c
208
struct dma_fence *iter;
drivers/gpu/drm/xe/xe_sched_job.c
210
dma_fence_chain_for_each(iter, job->fence)
drivers/gpu/drm/xe/xe_sched_job.c
211
xe_fence_set_error(dma_fence_chain_contained(iter),
drivers/gpu/drm/xlnx/zynqmp_dp.c
1099
unsigned int i, iter;
drivers/gpu/drm/xlnx/zynqmp_dp.c
1102
iter = zynqmp_dp_aux_timeout_ms * 1000 / 400;
drivers/gpu/drm/xlnx/zynqmp_dp.c
1103
iter = iter ? iter : 1;
drivers/gpu/drm/xlnx/zynqmp_dp.c
1105
for (i = 0; i < iter; i++) {
drivers/hid/hid-wiimote-core.c
1252
const __u8 *iter, *mods;
drivers/hid/hid-wiimote-core.c
1262
for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
drivers/hid/hid-wiimote-core.c
1263
ops = wiimod_table[*iter];
drivers/hid/hid-wiimote-core.c
1273
const __u8 *iter, *mods;
drivers/hid/hid-wiimote-core.c
1283
for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
drivers/hid/hid-wiimote-core.c
1284
ops = wiimod_table[*iter];
drivers/hid/hid-wiimote-core.c
1313
const __u8 *iter, *mods;
drivers/hid/hid-wiimote-core.c
1375
for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
drivers/hid/hid-wiimote-core.c
1376
ops = wiimod_table[*iter];
drivers/hid/hid-wiimote-core.c
1395
const __u8 *iter, *mods;
drivers/hid/hid-wiimote-core.c
1405
for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
drivers/hid/hid-wiimote-core.c
1406
ops = wiimod_table[*iter];
drivers/hid/hid-wiimote-core.c
628
const __u8 *mods, *iter;
drivers/hid/hid-wiimote-core.c
634
for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
drivers/hid/hid-wiimote-core.c
635
if (wiimod_table[*iter]->flags & WIIMOD_FLAG_INPUT) {
drivers/hid/hid-wiimote-core.c
655
for (iter = mods; *iter != WIIMOD_NULL; ++iter) {
drivers/hid/hid-wiimote-core.c
656
ops = wiimod_table[*iter];
drivers/hid/hid-wiimote-core.c
677
for ( ; iter-- != mods; ) {
drivers/hid/hid-wiimote-core.c
678
ops = wiimod_table[*iter];
drivers/hid/hid-wiimote-core.c
691
const __u8 *mods, *iter;
drivers/hid/hid-wiimote-core.c
702
for (iter = mods; *iter != WIIMOD_NULL; ++iter)
drivers/hid/hid-wiimote-core.c
710
for ( ; iter-- != mods; ) {
drivers/hid/hid-wiimote-core.c
711
ops = wiimod_table[*iter];
drivers/hv/channel_mgmt.c
974
struct vmbus_channel *channel = NULL, *iter;
drivers/hv/channel_mgmt.c
983
list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
drivers/hv/channel_mgmt.c
984
inst1 = &iter->offermsg.offer.if_instance;
drivers/hv/channel_mgmt.c
988
channel = iter;
drivers/hv/hv_common.c
189
struct kmsg_dump_iter iter;
drivers/hv/hv_common.c
202
kmsg_dump_rewind(&iter);
drivers/hv/hv_common.c
204
(void)kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
drivers/hv/vmbus_drv.c
2462
struct resource *iter, *shadow;
drivers/hv/vmbus_drv.c
2490
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
drivers/hv/vmbus_drv.c
2491
if ((iter->start >= max) || (iter->end <= min))
drivers/hv/vmbus_drv.c
2494
range_min = iter->start;
drivers/hv/vmbus_drv.c
2495
range_max = iter->end;
drivers/hv/vmbus_drv.c
2506
shadow = __request_region(iter, start, size, NULL,
drivers/hv/vmbus_drv.c
2518
__release_region(iter, start, size);
drivers/hv/vmbus_drv.c
2538
struct resource *iter;
drivers/hv/vmbus_drv.c
2552
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
drivers/hv/vmbus_drv.c
2553
if ((iter->start >= start + size) || (iter->end <= start))
drivers/hv/vmbus_drv.c
2556
__release_region(iter, start, size);
drivers/hwspinlock/hwspinlock_core.c
374
struct radix_tree_iter iter;
drivers/hwspinlock/hwspinlock_core.c
392
radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
drivers/hwspinlock/hwspinlock_core.c
397
slot = radix_tree_iter_retry(&iter);
drivers/hwtracing/intel_th/msu.c
1255
struct msc_window *win, *iter;
drivers/hwtracing/intel_th/msu.c
1257
list_for_each_entry_safe(win, iter, &msc->win_list, entry)
drivers/hwtracing/intel_th/msu.c
1475
struct msc_iter *iter;
drivers/hwtracing/intel_th/msu.c
1480
iter = msc_iter_install(msc);
drivers/hwtracing/intel_th/msu.c
1481
if (IS_ERR(iter))
drivers/hwtracing/intel_th/msu.c
1482
return PTR_ERR(iter);
drivers/hwtracing/intel_th/msu.c
1484
file->private_data = iter;
drivers/hwtracing/intel_th/msu.c
1491
struct msc_iter *iter = file->private_data;
drivers/hwtracing/intel_th/msu.c
1492
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
1494
msc_iter_remove(iter, msc);
drivers/hwtracing/intel_th/msu.c
1538
struct msc_iter *iter = file->private_data;
drivers/hwtracing/intel_th/msu.c
1539
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
1571
ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
drivers/hwtracing/intel_th/msu.c
1573
*ppos = iter->offset;
drivers/hwtracing/intel_th/msu.c
1590
struct msc_iter *iter = vma->vm_file->private_data;
drivers/hwtracing/intel_th/msu.c
1591
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
1598
struct msc_iter *iter = vma->vm_file->private_data;
drivers/hwtracing/intel_th/msu.c
1599
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
1611
struct msc_iter *iter = vmf->vma->vm_file->private_data;
drivers/hwtracing/intel_th/msu.c
1612
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
1632
struct msc_iter *iter = vma->vm_file->private_data;
drivers/hwtracing/intel_th/msu.c
1633
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
444
static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
drivers/hwtracing/intel_th/msu.c
446
return sg_virt(iter->block);
drivers/hwtracing/intel_th/msu.c
451
struct msc_iter *iter;
drivers/hwtracing/intel_th/msu.c
453
iter = kzalloc_obj(*iter);
drivers/hwtracing/intel_th/msu.c
454
if (!iter)
drivers/hwtracing/intel_th/msu.c
466
kfree(iter);
drivers/hwtracing/intel_th/msu.c
467
iter = ERR_PTR(-EBUSY);
drivers/hwtracing/intel_th/msu.c
471
iter->msc = msc;
drivers/hwtracing/intel_th/msu.c
473
list_add_tail(&iter->entry, &msc->iter_list);
drivers/hwtracing/intel_th/msu.c
477
return iter;
drivers/hwtracing/intel_th/msu.c
480
static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
drivers/hwtracing/intel_th/msu.c
483
list_del(&iter->entry);
drivers/hwtracing/intel_th/msu.c
486
kfree(iter);
drivers/hwtracing/intel_th/msu.c
489
static void msc_iter_block_start(struct msc_iter *iter)
drivers/hwtracing/intel_th/msu.c
491
if (iter->start_block)
drivers/hwtracing/intel_th/msu.c
494
iter->start_block = msc_win_oldest_sg(iter->win);
drivers/hwtracing/intel_th/msu.c
495
iter->block = iter->start_block;
drivers/hwtracing/intel_th/msu.c
496
iter->wrap_count = 0;
drivers/hwtracing/intel_th/msu.c
502
if (msc_block_wrapped(msc_iter_bdesc(iter)))
drivers/hwtracing/intel_th/msu.c
503
iter->wrap_count = 2;
drivers/hwtracing/intel_th/msu.c
507
static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
drivers/hwtracing/intel_th/msu.c
510
if (iter->start_win)
drivers/hwtracing/intel_th/msu.c
513
iter->start_win = msc_oldest_window(msc);
drivers/hwtracing/intel_th/msu.c
514
if (!iter->start_win)
drivers/hwtracing/intel_th/msu.c
517
iter->win = iter->start_win;
drivers/hwtracing/intel_th/msu.c
518
iter->start_block = NULL;
drivers/hwtracing/intel_th/msu.c
520
msc_iter_block_start(iter);
drivers/hwtracing/intel_th/msu.c
525
static int msc_iter_win_advance(struct msc_iter *iter)
drivers/hwtracing/intel_th/msu.c
527
iter->win = msc_next_window(iter->win);
drivers/hwtracing/intel_th/msu.c
528
iter->start_block = NULL;
drivers/hwtracing/intel_th/msu.c
530
if (iter->win == iter->start_win) {
drivers/hwtracing/intel_th/msu.c
531
iter->eof++;
drivers/hwtracing/intel_th/msu.c
535
msc_iter_block_start(iter);
drivers/hwtracing/intel_th/msu.c
540
static int msc_iter_block_advance(struct msc_iter *iter)
drivers/hwtracing/intel_th/msu.c
542
iter->block_off = 0;
drivers/hwtracing/intel_th/msu.c
545
if (iter->wrap_count && iter->block == iter->start_block) {
drivers/hwtracing/intel_th/msu.c
546
iter->wrap_count--;
drivers/hwtracing/intel_th/msu.c
547
if (!iter->wrap_count)
drivers/hwtracing/intel_th/msu.c
549
return msc_iter_win_advance(iter);
drivers/hwtracing/intel_th/msu.c
553
if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
drivers/hwtracing/intel_th/msu.c
555
return msc_iter_win_advance(iter);
drivers/hwtracing/intel_th/msu.c
558
if (sg_is_last(iter->block))
drivers/hwtracing/intel_th/msu.c
559
iter->block = msc_win_base_sg(iter->win);
drivers/hwtracing/intel_th/msu.c
561
iter->block = sg_next(iter->block);
drivers/hwtracing/intel_th/msu.c
564
if (!iter->wrap_count && iter->block == iter->start_block)
drivers/hwtracing/intel_th/msu.c
565
return msc_iter_win_advance(iter);
drivers/hwtracing/intel_th/msu.c
587
msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
drivers/hwtracing/intel_th/msu.c
590
struct msc *msc = iter->msc;
drivers/hwtracing/intel_th/msu.c
594
if (iter->eof)
drivers/hwtracing/intel_th/msu.c
598
if (msc_iter_win_start(iter, msc))
drivers/hwtracing/intel_th/msu.c
602
unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
drivers/hwtracing/intel_th/msu.c
603
void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
drivers/hwtracing/intel_th/msu.c
621
if (iter->block == iter->start_block && iter->wrap_count == 2) {
drivers/hwtracing/intel_th/msu.c
629
tocopy -= iter->block_off;
drivers/hwtracing/intel_th/msu.c
630
src += iter->block_off;
drivers/hwtracing/intel_th/msu.c
644
iter->block_off += copied;
drivers/hwtracing/intel_th/msu.c
645
iter->offset += copied;
drivers/hwtracing/intel_th/msu.c
651
if (msc_iter_block_advance(iter))
drivers/hwtracing/stm/core.c
435
struct stm_pdrv_entry *pe, *iter;
drivers/hwtracing/stm/core.c
439
list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
drivers/hwtracing/stm/core.c
931
struct stm_source_device *src, *iter;
drivers/hwtracing/stm/core.c
938
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
drivers/i2c/busses/i2c-npcm7xx.c
1959
int iter = 27;
drivers/i2c/busses/i2c-npcm7xx.c
1999
} while (!done && iter--);
drivers/iio/common/ssp_sensors/ssp_spi.c
338
struct ssp_msg *msg = NULL, *iter, *n;
drivers/iio/common/ssp_sensors/ssp_spi.c
364
list_for_each_entry_safe(iter, n, &data->pending_list, list) {
drivers/iio/common/ssp_sensors/ssp_spi.c
365
if (iter->options == msg_options) {
drivers/iio/common/ssp_sensors/ssp_spi.c
366
list_del(&iter->list);
drivers/iio/common/ssp_sensors/ssp_spi.c
367
msg = iter;
drivers/iio/industrialio-backend.c
582
struct iio_backend *back = ERR_PTR(-ENODEV), *iter;
drivers/iio/industrialio-backend.c
592
list_for_each_entry(iter, &iio_back_list, entry) {
drivers/iio/industrialio-backend.c
593
if (dev == iter->frontend_dev) {
drivers/iio/industrialio-backend.c
600
back = iter;
drivers/iio/industrialio-sw-device.c
32
struct iio_sw_device_type *d = NULL, *iter;
drivers/iio/industrialio-sw-device.c
34
list_for_each_entry(iter, &iio_device_types_list, list)
drivers/iio/industrialio-sw-device.c
35
if (!strcmp(iter->name, name)) {
drivers/iio/industrialio-sw-device.c
36
d = iter;
drivers/iio/industrialio-sw-device.c
45
struct iio_sw_device_type *iter;
drivers/iio/industrialio-sw-device.c
49
iter = __iio_find_sw_device_type(d->name, strlen(d->name));
drivers/iio/industrialio-sw-device.c
50
if (iter)
drivers/iio/industrialio-sw-device.c
70
struct iio_sw_device_type *iter;
drivers/iio/industrialio-sw-device.c
73
iter = __iio_find_sw_device_type(dt->name, strlen(dt->name));
drivers/iio/industrialio-sw-device.c
74
if (iter)
drivers/iio/industrialio-sw-trigger.c
32
struct iio_sw_trigger_type *t = NULL, *iter;
drivers/iio/industrialio-sw-trigger.c
34
list_for_each_entry(iter, &iio_trigger_types_list, list)
drivers/iio/industrialio-sw-trigger.c
35
if (!strcmp(iter->name, name)) {
drivers/iio/industrialio-sw-trigger.c
36
t = iter;
drivers/iio/industrialio-sw-trigger.c
45
struct iio_sw_trigger_type *iter;
drivers/iio/industrialio-sw-trigger.c
49
iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
drivers/iio/industrialio-sw-trigger.c
50
if (iter)
drivers/iio/industrialio-sw-trigger.c
74
struct iio_sw_trigger_type *iter;
drivers/iio/industrialio-sw-trigger.c
77
iter = __iio_find_sw_trigger_type(t->name, strlen(t->name));
drivers/iio/industrialio-sw-trigger.c
78
if (iter)
drivers/iio/industrialio-trigger.c
135
struct iio_trigger *iter;
drivers/iio/industrialio-trigger.c
137
list_for_each_entry(iter, &iio_trigger_list, list)
drivers/iio/industrialio-trigger.c
138
if (!strcmp(iter->name, name))
drivers/iio/industrialio-trigger.c
139
return iter;
drivers/iio/industrialio-trigger.c
146
struct iio_trigger *iter;
drivers/iio/industrialio-trigger.c
149
list_for_each_entry(iter, &iio_trigger_list, list)
drivers/iio/industrialio-trigger.c
150
if (sysfs_streq(iter->name, name))
drivers/iio/industrialio-trigger.c
151
return iio_trigger_get(iter);
drivers/iio/trigger/iio-trig-sysfs.c
179
struct iio_sysfs_trig *t = NULL, *iter;
drivers/iio/trigger/iio-trig-sysfs.c
182
list_for_each_entry(iter, &iio_sysfs_trig_list, l)
drivers/iio/trigger/iio-trig-sysfs.c
183
if (id == iter->id) {
drivers/iio/trigger/iio-trig-sysfs.c
184
t = iter;
drivers/infiniband/core/rw.c
206
struct bvec_iter *iter, u64 remote_addr, u32 rkey,
drivers/infiniband/core/rw.c
231
for (sg = ctx->reg[0].sgt.sgl; iter->bi_size; sg = sg_next(sg)) {
drivers/infiniband/core/rw.c
232
struct bio_vec bv = mp_bvec_iter_bvec(bvecs, *iter);
drivers/infiniband/core/rw.c
239
bvec_iter_advance(bvecs, iter, bv.bv_len);
drivers/infiniband/core/rw.c
371
struct bvec_iter *iter, u64 remote_addr, u32 rkey,
drivers/infiniband/core/rw.c
376
struct bio_vec bv = mp_bvec_iter_bvec(bvecs, *iter);
drivers/infiniband/core/rw.c
404
const struct bio_vec *bvecs, u32 nr_bvec, struct bvec_iter *iter,
drivers/infiniband/core/rw.c
444
struct bio_vec bv = mp_bvec_iter_bvec(bvecs, *iter);
drivers/infiniband/core/rw.c
459
bvec_iter_advance_single(bvecs, iter, bv.bv_len);
drivers/infiniband/core/rw.c
489
struct bvec_iter *iter, u64 remote_addr, u32 rkey,
drivers/infiniband/core/rw.c
494
size_t total_len = iter->bi_size;
drivers/infiniband/core/rw.c
504
first_bv = mp_bvec_iter_bvec(bvec, *iter);
drivers/infiniband/core/rw.c
510
while (iter->bi_size) {
drivers/infiniband/core/rw.c
511
struct bio_vec bv = mp_bvec_iter_bvec(bvec, *iter);
drivers/infiniband/core/rw.c
519
bvec_iter_advance(bvec, iter, bv.bv_len);
drivers/infiniband/core/rw.c
667
struct bvec_iter iter, u64 remote_addr, u32 rkey,
drivers/infiniband/core/rw.c
673
if (nr_bvec == 0 || iter.bi_size == 0)
drivers/infiniband/core/rw.c
682
nr_bvec, &iter, remote_addr,
drivers/infiniband/core/rw.c
686
nr_bvec, &iter, remote_addr,
drivers/infiniband/core/rw.c
690
return rdma_rw_init_single_wr_bvec(ctx, qp, bvecs, &iter,
drivers/infiniband/core/rw.c
698
ret = rdma_rw_init_iova_wrs_bvec(ctx, qp, bvecs, &iter, remote_addr,
drivers/infiniband/core/rw.c
714
return rdma_rw_init_map_wrs_bvec(ctx, qp, bvecs, nr_bvec, &iter,
drivers/infiniband/core/sa_query.c
1132
struct ib_sa_query *query = NULL, *iter;
drivers/infiniband/core/sa_query.c
1142
list_for_each_entry(iter, &ib_nl_request_list, list) {
drivers/infiniband/core/sa_query.c
1147
if (nlh->nlmsg_seq == iter->seq) {
drivers/infiniband/core/sa_query.c
1148
if (!ib_sa_query_cancelled(iter)) {
drivers/infiniband/core/sa_query.c
1149
list_del(&iter->list);
drivers/infiniband/core/sa_query.c
1150
query = iter;
drivers/infiniband/core/uverbs_cmd.c
132
struct uverbs_req_iter *iter,
drivers/infiniband/core/uverbs_cmd.c
142
iter->cur = attrs->ucore.inbuf + req_len;
drivers/infiniband/core/uverbs_cmd.c
143
iter->end = attrs->ucore.inbuf + attrs->ucore.inlen;
drivers/infiniband/core/uverbs_cmd.c
147
static int uverbs_request_next(struct uverbs_req_iter *iter, void *val,
drivers/infiniband/core/uverbs_cmd.c
150
if (iter->cur + len > iter->end)
drivers/infiniband/core/uverbs_cmd.c
153
if (copy_from_user(val, iter->cur, len))
drivers/infiniband/core/uverbs_cmd.c
156
iter->cur += len;
drivers/infiniband/core/uverbs_cmd.c
160
static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
drivers/infiniband/core/uverbs_cmd.c
163
const void __user *res = iter->cur;
drivers/infiniband/core/uverbs_cmd.c
165
if (len > iter->end - iter->cur)
drivers/infiniband/core/uverbs_cmd.c
167
iter->cur += len;
drivers/infiniband/core/uverbs_cmd.c
171
static int uverbs_request_finish(struct uverbs_req_iter *iter)
drivers/infiniband/core/uverbs_cmd.c
173
if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur))
drivers/infiniband/core/uverbs_cmd.c
2034
struct uverbs_req_iter iter;
drivers/infiniband/core/uverbs_cmd.c
2036
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
drivers/infiniband/core/uverbs_cmd.c
2039
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
drivers/infiniband/core/uverbs_cmd.c
2043
sgls = uverbs_request_next_ptr(&iter,
drivers/infiniband/core/uverbs_cmd.c
2048
ret = uverbs_request_finish(&iter);
drivers/infiniband/core/uverbs_cmd.c
2220
ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
drivers/infiniband/core/uverbs_cmd.c
2234
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
drivers/infiniband/core/uverbs_cmd.c
2237
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
drivers/infiniband/core/uverbs_cmd.c
2241
ret = uverbs_request_finish(iter);
drivers/infiniband/core/uverbs_cmd.c
2325
struct uverbs_req_iter iter;
drivers/infiniband/core/uverbs_cmd.c
2327
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
drivers/infiniband/core/uverbs_cmd.c
2331
wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
drivers/infiniband/core/uverbs_cmd.c
2376
struct uverbs_req_iter iter;
drivers/infiniband/core/uverbs_cmd.c
2378
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
drivers/infiniband/core/uverbs_cmd.c
2382
wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size,
drivers/infiniband/core/uverbs_cmd.c
3091
struct uverbs_req_iter iter;
drivers/infiniband/core/uverbs_cmd.c
3094
err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
drivers/infiniband/core/uverbs_cmd.c
3110
err = uverbs_request_next(&iter, wqs_handles,
drivers/infiniband/core/uverbs_cmd.c
3115
err = uverbs_request_finish(&iter);
drivers/infiniband/core/uverbs_cmd.c
3219
struct uverbs_req_iter iter;
drivers/infiniband/core/uverbs_cmd.c
3225
err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
drivers/infiniband/core/uverbs_cmd.c
3261
err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs,
drivers/infiniband/core/uverbs_cmd.c
3269
err = uverbs_request_finish(&iter);
drivers/infiniband/core/uverbs_uapi.c
356
struct radix_tree_iter iter;
drivers/infiniband/core/uverbs_uapi.c
363
radix_tree_for_each_slot (slot, &uapi->radix, &iter,
drivers/infiniband/core/uverbs_uapi.c
367
u32 attr_key = iter.index & UVERBS_API_ATTR_KEY_MASK;
drivers/infiniband/core/uverbs_uapi.c
371
if (uapi_key_attr_to_ioctl_method(iter.index) !=
drivers/infiniband/core/uverbs_uapi.c
420
struct radix_tree_iter iter;
drivers/infiniband/core/uverbs_uapi.c
425
radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
drivers/infiniband/core/uverbs_uapi.c
429
if (uapi_key_is_ioctl_method(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
431
iter.index);
drivers/infiniband/core/uverbs_uapi.c
436
if (uapi_key_is_write_method(iter.index))
drivers/infiniband/core/uverbs_uapi.c
438
iter.index & UVERBS_API_ATTR_KEY_MASK);
drivers/infiniband/core/uverbs_uapi.c
439
if (uapi_key_is_write_ex_method(iter.index))
drivers/infiniband/core/uverbs_uapi.c
442
iter.index & UVERBS_API_ATTR_KEY_MASK);
drivers/infiniband/core/uverbs_uapi.c
458
radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
drivers/infiniband/core/uverbs_uapi.c
459
if (uapi_key_is_write_method(iter.index))
drivers/infiniband/core/uverbs_uapi.c
460
uapi->write_methods[iter.index &
drivers/infiniband/core/uverbs_uapi.c
463
if (uapi_key_is_write_ex_method(iter.index))
drivers/infiniband/core/uverbs_uapi.c
464
uapi->write_ex_methods[iter.index &
drivers/infiniband/core/uverbs_uapi.c
474
struct radix_tree_iter iter;
drivers/infiniband/core/uverbs_uapi.c
477
radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) {
drivers/infiniband/core/uverbs_uapi.c
478
if (iter.index > last)
drivers/infiniband/core/uverbs_uapi.c
481
radix_tree_iter_delete(&uapi->radix, &iter, slot);
drivers/infiniband/core/uverbs_uapi.c
528
struct radix_tree_iter iter;
drivers/infiniband/core/uverbs_uapi.c
534
radix_tree_for_each_slot (slot, &uapi->radix, &iter, starting_key) {
drivers/infiniband/core/uverbs_uapi.c
535
uapi_key_okay(iter.index);
drivers/infiniband/core/uverbs_uapi.c
537
if (uapi_key_is_object(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
544
starting_key = iter.index;
drivers/infiniband/core/uverbs_uapi.c
545
uapi_remove_object(uapi, iter.index);
drivers/infiniband/core/uverbs_uapi.c
551
if (uapi_key_is_ioctl_method(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
556
starting_key = iter.index;
drivers/infiniband/core/uverbs_uapi.c
557
uapi_remove_method(uapi, iter.index);
drivers/infiniband/core/uverbs_uapi.c
563
if (uapi_key_is_write_method(iter.index) ||
drivers/infiniband/core/uverbs_uapi.c
564
uapi_key_is_write_ex_method(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
570
radix_tree_iter_delete(&uapi->radix, &iter, slot);
drivers/infiniband/core/uverbs_uapi.c
575
if (uapi_key_is_attr(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
600
starting_key = iter.index;
drivers/infiniband/core/uverbs_uapi.c
603
iter.index & (UVERBS_API_OBJ_KEY_MASK |
drivers/infiniband/core/uverbs_uapi.c
689
struct radix_tree_iter iter;
drivers/infiniband/core/uverbs_uapi.c
694
radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
drivers/infiniband/core/uverbs_uapi.c
695
if (uapi_key_is_ioctl_method(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
714
struct radix_tree_iter iter;
drivers/infiniband/core/uverbs_uapi.c
717
radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) {
drivers/infiniband/core/uverbs_uapi.c
718
if (uapi_key_is_object(iter.index)) {
drivers/infiniband/core/uverbs_uapi.c
728
} else if (uapi_key_is_attr(iter.index)) {
drivers/infiniband/hw/hfi1/debugfs.c
193
struct rvt_qp_iter *iter;
drivers/infiniband/hw/hfi1/debugfs.c
196
iter = rvt_qp_iter_init(s->private, 0, NULL);
drivers/infiniband/hw/hfi1/debugfs.c
201
if (!iter)
drivers/infiniband/hw/hfi1/debugfs.c
205
if (rvt_qp_iter_next(iter)) {
drivers/infiniband/hw/hfi1/debugfs.c
206
kfree(iter);
drivers/infiniband/hw/hfi1/debugfs.c
211
return iter;
drivers/infiniband/hw/hfi1/debugfs.c
218
struct rvt_qp_iter *iter = iter_ptr;
drivers/infiniband/hw/hfi1/debugfs.c
222
if (rvt_qp_iter_next(iter)) {
drivers/infiniband/hw/hfi1/debugfs.c
223
kfree(iter);
drivers/infiniband/hw/hfi1/debugfs.c
227
return iter;
drivers/infiniband/hw/hfi1/debugfs.c
238
struct rvt_qp_iter *iter = iter_ptr;
drivers/infiniband/hw/hfi1/debugfs.c
240
if (!iter)
drivers/infiniband/hw/hfi1/debugfs.c
243
qp_iter_print(s, iter);
drivers/infiniband/hw/hfi1/qp.c
598
void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
drivers/infiniband/hw/hfi1/qp.c
601
struct rvt_qp *qp = iter->qp;
drivers/infiniband/hw/hfi1/qp.c
616
iter->n,
drivers/infiniband/hw/hfi1/qp.h
83
void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
drivers/infiniband/hw/mlx5/fs.c
648
struct mlx5_ib_flow_handler *iter, *tmp;
drivers/infiniband/hw/mlx5/fs.c
653
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
drivers/infiniband/hw/mlx5/fs.c
654
mlx5_del_flow_rules(iter->rule);
drivers/infiniband/hw/mlx5/fs.c
655
put_flow_table(dev, iter->prio, true);
drivers/infiniband/hw/mlx5/fs.c
656
list_del(&iter->list);
drivers/infiniband/hw/mlx5/fs.c
657
kfree(iter);
drivers/infiniband/hw/mlx5/macsec.c
43
struct mlx5_macsec_device *iter, *macsec_device = NULL;
drivers/infiniband/hw/mlx5/macsec.c
45
list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
drivers/infiniband/hw/mlx5/macsec.c
46
if (iter->macdev == macdev) {
drivers/infiniband/hw/mlx5/macsec.c
47
macsec_device = iter;
drivers/infiniband/hw/mthca/mthca_cmd.c
660
struct mthca_icm_iter iter;
drivers/infiniband/hw/mthca/mthca_cmd.c
674
for (mthca_icm_first(icm, &iter);
drivers/infiniband/hw/mthca/mthca_cmd.c
675
!mthca_icm_last(&iter);
drivers/infiniband/hw/mthca/mthca_cmd.c
676
mthca_icm_next(&iter)) {
drivers/infiniband/hw/mthca/mthca_cmd.c
682
lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
drivers/infiniband/hw/mthca/mthca_cmd.c
686
(unsigned long long) mthca_icm_addr(&iter),
drivers/infiniband/hw/mthca/mthca_cmd.c
687
mthca_icm_size(&iter));
drivers/infiniband/hw/mthca/mthca_cmd.c
691
for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
drivers/infiniband/hw/mthca/mthca_cmd.c
698
cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
drivers/infiniband/hw/mthca/mthca_memfree.h
100
struct mthca_icm_iter *iter)
drivers/infiniband/hw/mthca/mthca_memfree.h
102
iter->icm = icm;
drivers/infiniband/hw/mthca/mthca_memfree.h
103
iter->chunk = list_empty(&icm->chunk_list) ?
drivers/infiniband/hw/mthca/mthca_memfree.h
106
iter->page_idx = 0;
drivers/infiniband/hw/mthca/mthca_memfree.h
109
static inline int mthca_icm_last(struct mthca_icm_iter *iter)
drivers/infiniband/hw/mthca/mthca_memfree.h
111
return !iter->chunk;
drivers/infiniband/hw/mthca/mthca_memfree.h
114
static inline void mthca_icm_next(struct mthca_icm_iter *iter)
drivers/infiniband/hw/mthca/mthca_memfree.h
116
if (++iter->page_idx >= iter->chunk->nsg) {
drivers/infiniband/hw/mthca/mthca_memfree.h
117
if (iter->chunk->list.next == &iter->icm->chunk_list) {
drivers/infiniband/hw/mthca/mthca_memfree.h
118
iter->chunk = NULL;
drivers/infiniband/hw/mthca/mthca_memfree.h
122
iter->chunk = list_entry(iter->chunk->list.next,
drivers/infiniband/hw/mthca/mthca_memfree.h
124
iter->page_idx = 0;
drivers/infiniband/hw/mthca/mthca_memfree.h
128
static inline dma_addr_t mthca_icm_addr(struct mthca_icm_iter *iter)
drivers/infiniband/hw/mthca/mthca_memfree.h
130
return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
drivers/infiniband/hw/mthca/mthca_memfree.h
133
static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
drivers/infiniband/hw/mthca/mthca_memfree.h
135
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
drivers/infiniband/hw/qedr/verbs.c
1066
int iter;
drivers/infiniband/hw/qedr/verbs.c
1106
iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
drivers/infiniband/hw/qedr/verbs.c
1107
while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
drivers/infiniband/hw/qedr/verbs.c
1109
iter--;
drivers/infiniband/hw/qedr/verbs.c
1112
iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
drivers/infiniband/hw/qedr/verbs.c
1113
while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
drivers/infiniband/hw/qedr/verbs.c
1115
iter--;
drivers/infiniband/sw/rdmavt/qp.c
2679
int rvt_qp_iter_next(struct rvt_qp_iter *iter)
drivers/infiniband/sw/rdmavt/qp.c
2682
int n = iter->n;
drivers/infiniband/sw/rdmavt/qp.c
2684
struct rvt_qp *pqp = iter->qp;
drivers/infiniband/sw/rdmavt/qp.c
2686
struct rvt_dev_info *rdi = iter->rdi;
drivers/infiniband/sw/rdmavt/qp.c
2702
for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
drivers/infiniband/sw/rdmavt/qp.c
2706
if (n < iter->specials) {
drivers/infiniband/sw/rdmavt/qp.c
2716
(n - iter->specials)]);
drivers/infiniband/sw/rdmavt/qp.c
2721
iter->qp = qp;
drivers/infiniband/sw/rdmavt/qp.c
2722
iter->n = n;
drivers/infiniband/ulp/ipoib/ipoib.h
564
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
drivers/infiniband/ulp/ipoib/ipoib.h
565
void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
drivers/infiniband/ulp/ipoib/ipoib.h
573
int ipoib_path_iter_next(struct ipoib_path_iter *iter);
drivers/infiniband/ulp/ipoib/ipoib.h
574
void ipoib_path_iter_read(struct ipoib_path_iter *iter,
drivers/infiniband/ulp/ipoib/ipoib_fs.c
105
if (!iter)
drivers/infiniband/ulp/ipoib/ipoib_fs.c
108
ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
drivers/infiniband/ulp/ipoib/ipoib_fs.c
138
struct ipoib_path_iter *iter;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
141
iter = ipoib_path_iter_init(file->private);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
142
if (!iter)
drivers/infiniband/ulp/ipoib/ipoib_fs.c
146
if (ipoib_path_iter_next(iter)) {
drivers/infiniband/ulp/ipoib/ipoib_fs.c
147
kfree(iter);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
152
return iter;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
158
struct ipoib_path_iter *iter = iter_ptr;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
162
if (ipoib_path_iter_next(iter)) {
drivers/infiniband/ulp/ipoib/ipoib_fs.c
163
kfree(iter);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
167
return iter;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
177
struct ipoib_path_iter *iter = iter_ptr;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
182
if (!iter)
drivers/infiniband/ulp/ipoib/ipoib_fs.c
185
ipoib_path_iter_read(iter, &path);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
60
struct ipoib_mcast_iter *iter;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
63
iter = ipoib_mcast_iter_init(file->private);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
64
if (!iter)
drivers/infiniband/ulp/ipoib/ipoib_fs.c
68
if (ipoib_mcast_iter_next(iter)) {
drivers/infiniband/ulp/ipoib/ipoib_fs.c
69
kfree(iter);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
74
return iter;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
80
struct ipoib_mcast_iter *iter = iter_ptr;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
84
if (ipoib_mcast_iter_next(iter)) {
drivers/infiniband/ulp/ipoib/ipoib_fs.c
85
kfree(iter);
drivers/infiniband/ulp/ipoib/ipoib_fs.c
89
return iter;
drivers/infiniband/ulp/ipoib/ipoib_fs.c
99
struct ipoib_mcast_iter *iter = iter_ptr;
drivers/infiniband/ulp/ipoib/ipoib_main.c
674
struct ipoib_path_iter *iter;
drivers/infiniband/ulp/ipoib/ipoib_main.c
676
iter = kmalloc_obj(*iter);
drivers/infiniband/ulp/ipoib/ipoib_main.c
677
if (!iter)
drivers/infiniband/ulp/ipoib/ipoib_main.c
680
iter->dev = dev;
drivers/infiniband/ulp/ipoib/ipoib_main.c
681
memset(iter->path.pathrec.dgid.raw, 0, 16);
drivers/infiniband/ulp/ipoib/ipoib_main.c
683
if (ipoib_path_iter_next(iter)) {
drivers/infiniband/ulp/ipoib/ipoib_main.c
684
kfree(iter);
drivers/infiniband/ulp/ipoib/ipoib_main.c
688
return iter;
drivers/infiniband/ulp/ipoib/ipoib_main.c
691
int ipoib_path_iter_next(struct ipoib_path_iter *iter)
drivers/infiniband/ulp/ipoib/ipoib_main.c
693
struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
drivers/infiniband/ulp/ipoib/ipoib_main.c
705
if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
drivers/infiniband/ulp/ipoib/ipoib_main.c
707
iter->path = *path;
drivers/infiniband/ulp/ipoib/ipoib_main.c
720
void ipoib_path_iter_read(struct ipoib_path_iter *iter,
drivers/infiniband/ulp/ipoib/ipoib_main.c
723
*path = iter->path;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1000
struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1012
if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1014
iter->mgid = mcast->mcmember.mgid;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1015
iter->created = mcast->created;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1016
iter->queuelen = skb_queue_len(&mcast->pkt_queue);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1017
iter->complete = !!mcast->ah;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1018
iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1033
void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1040
*mgid = iter->mgid;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1041
*created = iter->created;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1042
*queuelen = iter->queuelen;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1043
*complete = iter->complete;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1044
*send_only = iter->send_only;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
981
struct ipoib_mcast_iter *iter;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
983
iter = kmalloc_obj(*iter);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
984
if (!iter)
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
987
iter->dev = dev;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
988
memset(iter->mgid.raw, 0, 16);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
990
if (ipoib_mcast_iter_next(iter)) {
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
991
kfree(iter);
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
995
return iter;
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
998
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
drivers/infiniband/ulp/rtrs/rtrs-clt.c
956
struct iov_iter iter;
drivers/infiniband/ulp/rtrs/rtrs-clt.c
975
iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
drivers/infiniband/ulp/rtrs/rtrs-clt.c
976
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
drivers/iommu/generic_pt/kunit_iommu_pt.h
334
unsigned int iter;
drivers/iommu/generic_pt/kunit_iommu_pt.h
345
for (iter = 0; iter != 1000; iter++) {
drivers/iommu/generic_pt/kunit_iommu_pt.h
385
if (iter % 100)
drivers/iommu/intel/dmar.c
586
struct acpi_dmar_header *iter, *next;
drivers/iommu/intel/dmar.c
589
for (iter = start; iter < end; iter = next) {
drivers/iommu/intel/dmar.c
590
next = (void *)iter + iter->length;
drivers/iommu/intel/dmar.c
591
if (iter->length == 0) {
drivers/iommu/intel/dmar.c
602
dmar_table_print_dmar_entry(iter);
drivers/iommu/intel/dmar.c
604
if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
drivers/iommu/intel/dmar.c
607
iter->type);
drivers/iommu/intel/dmar.c
608
} else if (cb->cb[iter->type]) {
drivers/iommu/intel/dmar.c
611
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
drivers/iommu/intel/dmar.c
616
iter->type);
drivers/iommu/iommu.c
863
struct iommu_resv_region *iter, *tmp, *nr, *top;
drivers/iommu/iommu.c
872
list_for_each_entry(iter, regions, list) {
drivers/iommu/iommu.c
873
if (nr->start < iter->start ||
drivers/iommu/iommu.c
874
(nr->start == iter->start && nr->type <= iter->type))
drivers/iommu/iommu.c
877
list_add_tail(&nr->list, &iter->list);
drivers/iommu/iommu.c
880
list_for_each_entry_safe(iter, tmp, regions, list) {
drivers/iommu/iommu.c
881
phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
drivers/iommu/iommu.c
884
if (iter->type != new->type) {
drivers/iommu/iommu.c
885
list_move_tail(&iter->list, &stack);
drivers/iommu/iommu.c
891
if (top->type == iter->type)
drivers/iommu/iommu.c
894
list_move_tail(&iter->list, &stack);
drivers/iommu/iommu.c
900
if (iter->start > top_end + 1) {
drivers/iommu/iommu.c
901
list_move_tail(&iter->list, &stack);
drivers/iommu/iommu.c
904
list_del(&iter->list);
drivers/iommu/iommu.c
905
kfree(iter);
drivers/iommu/iommufd/device.c
1336
struct iopt_area_contig_iter iter;
drivers/iommu/iommufd/device.c
1357
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
drivers/iommu/iommufd/device.c
1359
area, iopt_area_iova_to_index(area, iter.cur_iova),
drivers/iommu/iommufd/device.c
1364
WARN_ON(!iopt_area_contig_done(&iter));
drivers/iommu/iommufd/device.c
1370
static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter)
drivers/iommu/iommufd/device.c
1372
if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
drivers/iommu/iommufd/device.c
1375
if (!iopt_area_contig_done(iter) &&
drivers/iommu/iommufd/device.c
1376
(iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) %
drivers/iommu/iommufd/device.c
1413
struct iopt_area_contig_iter iter;
drivers/iommu/iommufd/device.c
1438
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
drivers/iommu/iommufd/device.c
1442
iopt_area_iova_to_index(area, iter.cur_iova);
drivers/iommu/iommufd/device.c
1445
!iopt_area_contig_is_aligned(&iter)) {
drivers/iommu/iommufd/device.c
1461
if (!iopt_area_contig_done(&iter)) {
drivers/iommu/iommufd/device.c
1471
if (iova < iter.cur_iova) {
drivers/iommu/iommufd/device.c
1472
last_iova = iter.cur_iova - 1;
drivers/iommu/iommufd/device.c
1473
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
drivers/iommu/iommufd/device.c
1476
iopt_area_iova_to_index(area, iter.cur_iova),
drivers/iommu/iommufd/device.c
1503
struct iopt_area_contig_iter iter;
drivers/iommu/iommufd/device.c
1522
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
drivers/iommu/iommufd/device.c
1524
unsigned long bytes = (last - iter.cur_iova) + 1;
drivers/iommu/iommufd/device.c
1537
area->pages, iopt_area_start_byte(area, iter.cur_iova),
drivers/iommu/iommufd/device.c
1543
if (!iopt_area_contig_done(&iter))
drivers/iommu/iommufd/double_span.h
32
struct interval_tree_double_span_iter *iter);
drivers/iommu/iommufd/double_span.h
34
struct interval_tree_double_span_iter *iter,
drivers/iommu/iommufd/double_span.h
38
struct interval_tree_double_span_iter *iter);
drivers/iommu/iommufd/io_pagetable.c
33
struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter,
drivers/iommu/iommufd/io_pagetable.c
40
iter->cur_iova = iova;
drivers/iommu/iommufd/io_pagetable.c
41
iter->last_iova = last_iova;
drivers/iommu/iommufd/io_pagetable.c
42
iter->area = iopt_area_iter_first(iopt, iova, iova);
drivers/iommu/iommufd/io_pagetable.c
43
if (!iter->area)
drivers/iommu/iommufd/io_pagetable.c
45
if (!iter->area->pages) {
drivers/iommu/iommufd/io_pagetable.c
46
iter->area = NULL;
drivers/iommu/iommufd/io_pagetable.c
49
return iter->area;
drivers/iommu/iommufd/io_pagetable.c
52
struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter)
drivers/iommu/iommufd/io_pagetable.c
541
struct iopt_area_contig_iter iter;
drivers/iommu/iommufd/io_pagetable.c
550
iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) {
drivers/iommu/iommufd/io_pagetable.c
553
ret = ops->read_and_clear_dirty(domain, iter.cur_iova,
drivers/iommu/iommufd/io_pagetable.c
554
last - iter.cur_iova + 1, flags,
drivers/iommu/iommufd/io_pagetable.c
56
if (!iter->area)
drivers/iommu/iommufd/io_pagetable.c
560
if (!iopt_area_contig_done(&iter))
drivers/iommu/iommufd/io_pagetable.c
574
struct iova_bitmap *iter;
drivers/iommu/iommufd/io_pagetable.c
58
last_iova = iopt_area_last_iova(iter->area);
drivers/iommu/iommufd/io_pagetable.c
580
iter = iova_bitmap_alloc(bitmap->iova, bitmap->length,
drivers/iommu/iommufd/io_pagetable.c
583
if (IS_ERR(iter))
drivers/iommu/iommufd/io_pagetable.c
586
iommu_dirty_bitmap_init(&dirty, iter, &gather);
drivers/iommu/iommufd/io_pagetable.c
59
if (iter->last_iova <= last_iova)
drivers/iommu/iommufd/io_pagetable.c
592
iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty);
drivers/iommu/iommufd/io_pagetable.c
597
iova_bitmap_free(iter);
drivers/iommu/iommufd/io_pagetable.c
62
iter->cur_iova = last_iova + 1;
drivers/iommu/iommufd/io_pagetable.c
63
iter->area = iopt_area_iter_next(iter->area, iter->cur_iova,
drivers/iommu/iommufd/io_pagetable.c
64
iter->last_iova);
drivers/iommu/iommufd/io_pagetable.c
65
if (!iter->area)
drivers/iommu/iommufd/io_pagetable.c
67
if (iter->cur_iova != iopt_area_iova(iter->area) ||
drivers/iommu/iommufd/io_pagetable.c
68
!iter->area->pages) {
drivers/iommu/iommufd/io_pagetable.c
69
iter->area = NULL;
drivers/iommu/iommufd/io_pagetable.c
703
struct iopt_area_contig_iter iter;
drivers/iommu/iommufd/io_pagetable.c
714
iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
drivers/iommu/iommufd/io_pagetable.c
72
return iter->area;
drivers/iommu/iommufd/io_pagetable.c
723
elm->start_byte = iopt_area_start_byte(area, iter.cur_iova);
drivers/iommu/iommufd/io_pagetable.c
725
elm->length = (last - iter.cur_iova) + 1;
drivers/iommu/iommufd/io_pagetable.c
729
if (!iopt_area_contig_done(&iter)) {
drivers/iommu/iommufd/io_pagetable.h
164
struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter,
drivers/iommu/iommufd/io_pagetable.h
168
struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter);
drivers/iommu/iommufd/io_pagetable.h
170
static inline bool iopt_area_contig_done(struct iopt_area_contig_iter *iter)
drivers/iommu/iommufd/io_pagetable.h
172
return iter->area && iter->last_iova <= iopt_area_last_iova(iter->area);
drivers/iommu/iommufd/io_pagetable.h
180
#define iopt_for_each_contig_area(iter, area, iopt, iova, last_iova) \
drivers/iommu/iommufd/io_pagetable.h
181
for (area = iopt_area_contig_init(iter, iopt, iova, last_iova); area; \
drivers/iommu/iommufd/io_pagetable.h
182
area = iopt_area_contig_next(iter))
drivers/iommu/iommufd/pages.c
107
struct interval_tree_double_span_iter *iter)
drivers/iommu/iommufd/pages.c
112
for (i = 0; i != ARRAY_SIZE(iter->spans); i++) {
drivers/iommu/iommufd/pages.c
113
if (interval_tree_span_iter_done(&iter->spans[i])) {
drivers/iommu/iommufd/pages.c
114
iter->is_used = -1;
drivers/iommu/iommufd/pages.c
118
if (iter->spans[i].is_hole) {
drivers/iommu/iommufd/pages.c
119
last_hole = min(last_hole, iter->spans[i].last_hole);
drivers/iommu/iommufd/pages.c
123
iter->is_used = i + 1;
drivers/iommu/iommufd/pages.c
124
iter->start_used = iter->spans[i].start_used;
drivers/iommu/iommufd/pages.c
125
iter->last_used = min(iter->spans[i].last_used, last_hole);
drivers/iommu/iommufd/pages.c
129
iter->is_used = 0;
drivers/iommu/iommufd/pages.c
130
iter->start_hole = iter->spans[0].start_hole;
drivers/iommu/iommufd/pages.c
131
iter->last_hole =
drivers/iommu/iommufd/pages.c
132
min(iter->spans[0].last_hole, iter->spans[1].last_hole);
drivers/iommu/iommufd/pages.c
136
struct interval_tree_double_span_iter *iter,
drivers/iommu/iommufd/pages.c
142
iter->itrees[0] = itree1;
drivers/iommu/iommufd/pages.c
143
iter->itrees[1] = itree2;
drivers/iommu/iommufd/pages.c
144
for (i = 0; i != ARRAY_SIZE(iter->spans); i++)
drivers/iommu/iommufd/pages.c
145
interval_tree_span_iter_first(&iter->spans[i], iter->itrees[i],
drivers/iommu/iommufd/pages.c
147
interval_tree_double_span_iter_update(iter);
drivers/iommu/iommufd/pages.c
151
struct interval_tree_double_span_iter *iter)
drivers/iommu/iommufd/pages.c
155
if (iter->is_used == -1 ||
drivers/iommu/iommufd/pages.c
156
iter->last_hole == iter->spans[0].last_index) {
drivers/iommu/iommufd/pages.c
157
iter->is_used = -1;
drivers/iommu/iommufd/pages.c
161
for (i = 0; i != ARRAY_SIZE(iter->spans); i++)
drivers/iommu/iommufd/pages.c
163
&iter->spans[i], iter->itrees[i], iter->last_hole + 1);
drivers/iommu/iommufd/pages.c
164
interval_tree_double_span_iter_update(iter);
drivers/iommu/msm_iommu.c
632
struct msm_iommu_dev *iommu = NULL, *iter;
drivers/iommu/msm_iommu.c
637
list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
drivers/iommu/msm_iommu.c
638
if (iter->dev->of_node == spec->np) {
drivers/iommu/msm_iommu.c
639
iommu = iter;
drivers/irqchip/irq-sifive-plic.c
100
for (unsigned int iter = 1; iter < ACCESS_PRIVATE(priv, total_irqs); iter++)
drivers/irqchip/irq-sifive-plic.c
99
#define for_each_device_irq(iter, priv) \
drivers/md/bcache/alloc.c
419
size_t iter;
drivers/md/bcache/alloc.c
423
for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
drivers/md/bcache/alloc.c
424
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
drivers/md/bcache/alloc.c
427
fifo_for_each(i, &ca->free[j], iter)
drivers/md/bcache/alloc.c
429
fifo_for_each(i, &ca->free_inc, iter)
drivers/md/bcache/bset.c
1089
static inline bool btree_iter_end(struct btree_iter *iter)
drivers/md/bcache/bset.c
1091
return !iter->used;
drivers/md/bcache/bset.c
1094
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
drivers/md/bcache/bset.c
1098
BUG_ON(!heap_add(iter,
drivers/md/bcache/bset.c
1104
struct btree_iter_stack *iter,
drivers/md/bcache/bset.c
111
static void bch_btree_iter_next_check(struct btree_iter *iter)
drivers/md/bcache/bset.c
1110
iter->iter.size = ARRAY_SIZE(iter->stack_data);
drivers/md/bcache/bset.c
1111
iter->iter.used = 0;
drivers/md/bcache/bset.c
1114
iter->iter.b = b;
drivers/md/bcache/bset.c
1119
bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
drivers/md/bcache/bset.c
1126
struct btree_iter_stack *iter,
drivers/md/bcache/bset.c
1129
return __bch_btree_iter_stack_init(b, iter, search, b->set);
drivers/md/bcache/bset.c
113
struct bkey *k = iter->data->k, *next = bkey_next(k);
drivers/md/bcache/bset.c
1132
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
drivers/md/bcache/bset.c
1138
if (!btree_iter_end(iter)) {
drivers/md/bcache/bset.c
1139
bch_btree_iter_next_check(iter);
drivers/md/bcache/bset.c
1141
ret = iter->data->k;
drivers/md/bcache/bset.c
1142
iter->data->k = bkey_next(iter->data->k);
drivers/md/bcache/bset.c
1144
if (iter->data->k > iter->data->end) {
drivers/md/bcache/bset.c
1146
iter->data->k = iter->data->end;
drivers/md/bcache/bset.c
1149
if (iter->data->k == iter->data->end)
drivers/md/bcache/bset.c
115
if (next < iter->data->end &&
drivers/md/bcache/bset.c
1150
heap_pop(iter, b, cmp);
drivers/md/bcache/bset.c
1152
heap_sift(iter, 0, cmp);
drivers/md/bcache/bset.c
1158
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
drivers/md/bcache/bset.c
116
bkey_cmp(k, iter->b->ops->is_extents ?
drivers/md/bcache/bset.c
1160
return __bch_btree_iter_next(iter, btree_iter_cmp);
drivers/md/bcache/bset.c
1164
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
drivers/md/bcache/bset.c
1170
ret = bch_btree_iter_next(iter);
drivers/md/bcache/bset.c
118
bch_dump_bucket(iter->b);
drivers/md/bcache/bset.c
1195
struct btree_iter *iter,
drivers/md/bcache/bset.c
1206
for (i = iter->used / 2 - 1; i >= 0; --i)
drivers/md/bcache/bset.c
1207
heap_sift(iter, i, b->ops->sort_cmp);
drivers/md/bcache/bset.c
1209
while (!btree_iter_end(iter)) {
drivers/md/bcache/bset.c
1211
k = b->ops->sort_fixup(iter, &tmp.k);
drivers/md/bcache/bset.c
1216
k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
drivers/md/bcache/bset.c
1235
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
drivers/md/bcache/bset.c
125
static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
drivers/md/bcache/bset.c
1256
btree_mergesort(b, out, iter, fixup, false);
drivers/md/bcache/bset.c
1296
struct btree_iter_stack iter;
drivers/md/bcache/bset.c
1299
__bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
drivers/md/bcache/bset.c
1310
__btree_sort(b, &iter.iter, start, order, false, state);
drivers/md/bcache/bset.c
1316
struct btree_iter *iter,
drivers/md/bcache/bset.c
1319
__btree_sort(b, iter, 0, b->page_order, true, state);
drivers/md/bcache/bset.c
1326
struct btree_iter_stack iter;
drivers/md/bcache/bset.c
1328
bch_btree_iter_stack_init(b, &iter, NULL);
drivers/md/bcache/bset.c
1330
btree_mergesort(b, new->set->data, &iter.iter, false, true);
drivers/md/bcache/bset.c
57
struct btree_iter_stack iter;
drivers/md/bcache/bset.c
61
for_each_key(b, k, &iter)
drivers/md/bcache/bset.c
70
struct btree_iter_stack iter;
drivers/md/bcache/bset.c
73
for_each_key(b, k, &iter) {
drivers/md/bcache/bset.c
882
struct btree_iter_stack iter;
drivers/md/bcache/bset.c
898
m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
drivers/md/bcache/bset.c
900
if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
drivers/md/bcache/bset.h
192
struct bkey *(*sort_fixup)(struct btree_iter *iter,
drivers/md/bcache/bset.h
196
struct btree_iter *iter,
drivers/md/bcache/bset.h
331
TRAILING_OVERLAP(struct btree_iter, iter, data,
drivers/md/bcache/bset.h
335
static_assert(offsetof(struct btree_iter_stack, iter.data) ==
drivers/md/bcache/bset.h
340
struct bkey *bch_btree_iter_next(struct btree_iter *iter);
drivers/md/bcache/bset.h
341
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
drivers/md/bcache/bset.h
345
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
drivers/md/bcache/bset.h
348
struct btree_iter_stack *iter,
drivers/md/bcache/bset.h
366
((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
drivers/md/bcache/bset.h
371
((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
drivers/md/bcache/bset.h
391
struct btree_iter *iter,
drivers/md/bcache/btree.c
1310
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
1315
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
drivers/md/bcache/btree.c
152
struct btree_iter *iter;
drivers/md/bcache/btree.c
1571
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
1574
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
drivers/md/bcache/btree.c
159
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
drivers/md/bcache/btree.c
160
iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
drivers/md/bcache/btree.c
161
iter->used = 0;
drivers/md/bcache/btree.c
1612
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
1616
bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
drivers/md/bcache/btree.c
1622
k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
drivers/md/bcache/btree.c
164
iter->b = &b->keys;
drivers/md/bcache/btree.c
1917
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
1919
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
drivers/md/bcache/btree.c
1925
bch_btree_iter_stack_init(&b->keys, &iter, NULL);
drivers/md/bcache/btree.c
1928
k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
drivers/md/bcache/btree.c
1956
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
1965
bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
drivers/md/bcache/btree.c
1966
k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
drivers/md/bcache/btree.c
1984
k = bch_btree_iter_next_filter(&iter.iter,
drivers/md/bcache/btree.c
202
bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
drivers/md/bcache/btree.c
2057
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
2061
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
drivers/md/bcache/btree.c
214
bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
drivers/md/bcache/btree.c
226
mempool_free(iter, &b->c->fill_iter);
drivers/md/bcache/btree.c
2553
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
2555
bch_btree_iter_stack_init(&b->keys, &iter, from);
drivers/md/bcache/btree.c
2557
while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
drivers/md/bcache/btree.c
2586
struct btree_iter_stack iter;
drivers/md/bcache/btree.c
2588
bch_btree_iter_stack_init(&b->keys, &iter, from);
drivers/md/bcache/btree.c
2590
while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
drivers/md/bcache/btree.h
204
#define for_each_cached_btree(b, c, iter) \
drivers/md/bcache/btree.h
205
for (iter = 0; \
drivers/md/bcache/btree.h
206
iter < ARRAY_SIZE((c)->bucket_hash); \
drivers/md/bcache/btree.h
207
iter++) \
drivers/md/bcache/btree.h
208
hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
drivers/md/bcache/debug.c
113
struct bvec_iter iter, citer = { 0 };
drivers/md/bcache/debug.c
129
bio_for_each_segment(bv, bio, iter) {
drivers/md/bcache/extents.c
229
struct btree_iter *iter,
drivers/md/bcache/extents.c
266
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
drivers/md/bcache/extents.c
269
while (iter->used > 1) {
drivers/md/bcache/extents.c
270
struct btree_iter_set *top = iter->data, *i = top + 1;
drivers/md/bcache/extents.c
272
if (iter->used > 2 &&
drivers/md/bcache/extents.c
280
sort_key_next(iter, i);
drivers/md/bcache/extents.c
281
heap_sift(iter, i - top, bch_extent_sort_cmp);
drivers/md/bcache/extents.c
287
sort_key_next(iter, i);
drivers/md/bcache/extents.c
291
heap_sift(iter, i - top, bch_extent_sort_cmp);
drivers/md/bcache/extents.c
30
static void sort_key_next(struct btree_iter *iter,
drivers/md/bcache/extents.c
301
heap_sift(iter, 0, bch_extent_sort_cmp);
drivers/md/bcache/extents.c
325
struct btree_iter *iter,
drivers/md/bcache/extents.c
337
struct bkey *k = bch_btree_iter_next(iter);
drivers/md/bcache/extents.c
36
*i = iter->data[--iter->used];
drivers/md/bcache/request.c
43
struct bvec_iter iter;
drivers/md/bcache/request.c
46
bio_for_each_segment(bv, bio, iter) {
drivers/md/bcache/sysfs.c
662
struct btree_iter_stack iter;
drivers/md/bcache/sysfs.c
673
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
drivers/md/bcache/util.h
119
#define fifo_for_each(c, fifo, iter) \
drivers/md/bcache/util.h
120
for (iter = (fifo)->front; \
drivers/md/bcache/util.h
121
c = (fifo)->data[iter], iter != (fifo)->back; \
drivers/md/bcache/util.h
122
iter = (iter + 1) & (fifo)->mask)
drivers/md/bcache/writeback.c
1001
k, &iter, bch_ptr_invalid) {
drivers/md/bcache/writeback.c
910
struct btree_iter_stack iter;
drivers/md/bcache/writeback.c
917
bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
drivers/md/bcache/writeback.c
918
k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
drivers/md/bcache/writeback.c
932
k = bch_btree_iter_next_filter(&iter.iter,
drivers/md/bcache/writeback.c
981
struct btree_iter_stack iter;
drivers/md/dm-ebs-target.c
126
struct bvec_iter iter;
drivers/md/dm-ebs-target.c
128
bio_for_each_bvec(bv, bio, iter) {
drivers/md/dm-ebs-target.c
129
rr = __ebs_rw_bvec(ec, op, &bv, &iter);
drivers/md/dm-ebs-target.c
66
struct bvec_iter *iter)
drivers/md/dm-ebs-target.c
72
unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
drivers/md/dm-ebs-target.c
73
sector_t block = __sector_to_block(ec, iter->bi_sector);
drivers/md/dm-flakey.c
364
struct bvec_iter iter;
drivers/md/dm-flakey.c
371
__bio_for_each_segment(bvec, bio, iter, start) {
drivers/md/dm-flakey.c
372
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
drivers/md/dm-flakey.c
384
corrupt_bio_byte -= bio_iter_len(bio, iter);
drivers/md/dm-flakey.c
432
struct bvec_iter iter = bio->bi_iter;
drivers/md/dm-flakey.c
475
struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter);
drivers/md/dm-flakey.c
481
bvec_iter_advance(bio->bi_io_vec, &iter, this_step);
drivers/md/dm-integrity.c
1802
struct bvec_iter iter;
drivers/md/dm-integrity.c
1815
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
drivers/md/dm-integrity.c
1886
struct bvec_iter iter;
drivers/md/dm-integrity.c
1943
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
drivers/md/dm-integrity.c
1990
struct bvec_iter iter;
drivers/md/dm-integrity.c
1996
bip_for_each_vec(biv, bip, iter) {
drivers/md/dm-integrity.c
2036
struct bvec_iter iter;
drivers/md/dm-integrity.c
2039
bio_for_each_segment(bv, bio, iter) {
drivers/md/dm-io-rewind.c
111
struct bvec_iter *iter, unsigned int bytes)
drivers/md/dm-io-rewind.c
113
iter->bi_sector -= bytes >> 9;
drivers/md/dm-io-rewind.c
117
iter->bi_size += bytes;
drivers/md/dm-io-rewind.c
119
dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
drivers/md/dm-io-rewind.c
13
struct bvec_iter *iter,
drivers/md/dm-io-rewind.c
18
iter->bi_size += bytes;
drivers/md/dm-io-rewind.c
19
if (bytes <= iter->bi_bvec_done) {
drivers/md/dm-io-rewind.c
20
iter->bi_bvec_done -= bytes;
drivers/md/dm-io-rewind.c
24
bytes -= iter->bi_bvec_done;
drivers/md/dm-io-rewind.c
25
idx = iter->bi_idx - 1;
drivers/md/dm-io-rewind.c
34
iter->bi_size -= bytes;
drivers/md/dm-io-rewind.c
35
iter->bi_bvec_done = 0;
drivers/md/dm-io-rewind.c
36
iter->bi_idx = 0;
drivers/md/dm-io-rewind.c
40
iter->bi_idx = idx;
drivers/md/dm-io-rewind.c
41
iter->bi_bvec_done = bv[idx].bv_len - bytes;
drivers/md/dm-log-writes.c
649
struct bvec_iter iter;
drivers/md/dm-log-writes.c
733
bio_for_each_segment(bv, bio, iter) {
drivers/md/dm-pcache/segment.c
11
struct iov_iter iter;
drivers/md/dm-pcache/segment.c
15
iov_iter_bvec(&iter, ITER_DEST, &bio->bi_io_vec[bio->bi_iter.bi_idx],
drivers/md/dm-pcache/segment.c
17
iter.iov_offset = bio->bi_iter.bi_bvec_done;
drivers/md/dm-pcache/segment.c
19
iov_iter_advance(&iter, bio_off);
drivers/md/dm-pcache/segment.c
22
copied = _copy_mc_to_iter(src, data_len, &iter);
drivers/md/dm-pcache/segment.c
32
struct iov_iter iter;
drivers/md/dm-pcache/segment.c
36
iov_iter_bvec(&iter, ITER_SOURCE, &bio->bi_io_vec[bio->bi_iter.bi_idx],
drivers/md/dm-pcache/segment.c
38
iter.iov_offset = bio->bi_iter.bi_bvec_done;
drivers/md/dm-pcache/segment.c
40
iov_iter_advance(&iter, bio_off);
drivers/md/dm-pcache/segment.c
43
copied = _copy_from_iter_flushcache(dst, data_len, &iter);
drivers/md/dm-vdo/data-vio.c
298
struct bvec_iter iter;
drivers/md/dm-vdo/data-vio.c
300
bio_for_each_segment(biovec, bio, iter) {
drivers/md/dm-vdo/data-vio.c
520
struct bvec_iter iter;
drivers/md/dm-vdo/data-vio.c
522
bio_for_each_segment(biovec, bio, iter) {
drivers/md/dm-verity-target.c
514
struct bvec_iter *iter;
drivers/md/dm-verity-target.c
526
iter_copy = io->iter;
drivers/md/dm-verity-target.c
527
iter = &iter_copy;
drivers/md/dm-verity-target.c
529
iter = &io->iter;
drivers/md/dm-verity-target.c
532
b++, bio_advance_iter_single(bio, iter, block_size)) {
drivers/md/dm-verity-target.c
550
bv = bio_iter_iovec(bio, *iter);
drivers/md/dm-verity-target.c
817
io->iter = bio->bi_iter;
drivers/md/dm-verity.h
100
struct bvec_iter iter;
drivers/md/md.c
10704
struct md_rdev *rdev = NULL, *iter;
drivers/md/md.c
10708
rdev_for_each_rcu(iter, mddev) {
drivers/md/md.c
10709
if (iter->desc_nr == nr) {
drivers/md/md.c
10710
rdev = iter;
drivers/md/md.c
2741
struct md_rdev *rdev = NULL, *iter;
drivers/md/md.c
2746
rdev_for_each(iter, mddev)
drivers/md/md.c
2747
if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
drivers/md/md.c
2748
rdev = iter;
drivers/md/raid5.c
1367
struct bvec_iter iter;
drivers/md/raid5.c
1383
bio_for_each_segment(bvl, bio, iter) {
drivers/media/mc/mc-device.c
580
struct media_pad *iter;
drivers/media/mc/mc-device.c
596
media_entity_for_each_pad(entity, iter)
drivers/media/mc/mc-device.c
597
media_gobj_destroy(&iter->graph_obj);
drivers/media/mc/mc-device.c
609
struct media_pad *iter;
drivers/media/mc/mc-device.c
638
media_entity_for_each_pad(entity, iter)
drivers/media/mc/mc-device.c
639
media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj);
drivers/media/mc/mc-entity.c
1003
struct media_pipeline_pad_iter *iter,
drivers/media/mc/mc-entity.c
1007
iter->cursor = pipe->pads.next;
drivers/media/mc/mc-entity.c
1009
if (iter->cursor == &pipe->pads)
drivers/media/mc/mc-entity.c
1012
pad = list_entry(iter->cursor, struct media_pipeline_pad, list)->pad;
drivers/media/mc/mc-entity.c
1013
iter->cursor = iter->cursor->next;
drivers/media/mc/mc-entity.c
1020
struct media_pipeline_entity_iter *iter)
drivers/media/mc/mc-entity.c
1022
return media_entity_enum_init(&iter->ent_enum, pipe->mdev);
drivers/media/mc/mc-entity.c
1026
void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter)
drivers/media/mc/mc-entity.c
1028
media_entity_enum_cleanup(&iter->ent_enum);
drivers/media/mc/mc-entity.c
1034
struct media_pipeline_entity_iter *iter,
drivers/media/mc/mc-entity.c
1038
iter->cursor = pipe->pads.next;
drivers/media/mc/mc-entity.c
1040
while (iter->cursor != &pipe->pads) {
drivers/media/mc/mc-entity.c
1044
ppad = list_entry(iter->cursor, struct media_pipeline_pad, list);
drivers/media/mc/mc-entity.c
1046
iter->cursor = iter->cursor->next;
drivers/media/mc/mc-entity.c
1048
if (!media_entity_enum_test_and_set(&iter->ent_enum, entity))
drivers/media/mc/mc-entity.c
198
struct media_pad *iter;
drivers/media/mc/mc-entity.c
211
media_entity_for_each_pad(entity, iter) {
drivers/media/mc/mc-entity.c
212
iter->entity = entity;
drivers/media/mc/mc-entity.c
213
iter->index = i++;
drivers/media/mc/mc-entity.c
215
if (hweight32(iter->flags & (MEDIA_PAD_FL_SINK |
drivers/media/mc/mc-entity.c
223
&iter->graph_obj);
drivers/media/mc/mc-entity.c
227
media_entity_for_each_pad(entity, iter)
drivers/media/mc/mc-entity.c
228
media_gobj_destroy(&iter->graph_obj);
drivers/media/platform/broadcom/bcm2835-unicam.c
1675
struct media_pipeline_pad_iter iter;
drivers/media/platform/broadcom/bcm2835-unicam.c
1700
media_pipeline_for_each_pad(&unicam->pipe.pipe, &iter, pad) {
drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
401
struct iris_buffer *buf, *iter;
drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
447
list_for_each_entry(iter, &buffers->list, list) {
drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
448
if (!(iter->attr & BUF_ATTR_QUEUED))
drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
451
found = (iter->index == output_tag &&
drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
452
iter->data_offset == offset);
drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
455
buf = iter;
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
435
struct iris_buffer *buf, *iter;
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
438
list_for_each_entry(iter, &buffers->list, list) {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
439
if (iter->device_addr == buffer->base_address) {
drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
441
buf = iter;
drivers/media/platform/qcom/iris/iris_vidc.c
45
struct iris_inst *iter;
drivers/media/platform/qcom/iris/iris_vidc.c
50
list_for_each_entry(iter, &core->instances, list)
drivers/media/platform/qcom/iris/iris_vidc.c
62
struct iris_inst *iter, *temp;
drivers/media/platform/qcom/iris/iris_vidc.c
65
list_for_each_entry_safe(iter, temp, &core->instances, list) {
drivers/media/platform/qcom/iris/iris_vidc.c
66
if (iter->session_id == inst->session_id) {
drivers/media/platform/qcom/iris/iris_vidc.c
67
list_del_init(&iter->list);
drivers/media/platform/rockchip/rga/rga-buf.c
23
struct sg_dma_page_iter iter;
drivers/media/platform/rockchip/rga/rga-buf.c
28
for_each_sgtable_dma_page(sgt, &iter, 0) {
drivers/media/platform/rockchip/rga/rga-buf.c
31
addr = sg_page_iter_dma_address(&iter);
drivers/media/platform/ti/omap3isp/ispvideo.c
228
struct media_pipeline_entity_iter iter;
drivers/media/platform/ti/omap3isp/ispvideo.c
233
ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter);
drivers/media/platform/ti/omap3isp/ispvideo.c
237
media_pipeline_for_each_entity(&pipe->pipe, &iter, entity) {
drivers/media/platform/ti/omap3isp/ispvideo.c
256
media_pipeline_entity_iter_cleanup(&iter);
drivers/media/platform/xilinx/xilinx-dma.c
177
struct media_pipeline_pad_iter iter;
drivers/media/platform/xilinx/xilinx-dma.c
183
media_pipeline_for_each_pad(&pipe->pipe, &iter, pad) {
drivers/media/usb/uvc/uvc_ctrl.c
2833
struct uvc_entity *entity, *iter;
drivers/media/usb/uvc/uvc_ctrl.c
2844
list_for_each_entry(iter, &chain->entities, chain) {
drivers/media/usb/uvc/uvc_ctrl.c
2845
if (UVC_ENTITY_TYPE(iter) == UVC_VC_EXTENSION_UNIT &&
drivers/media/usb/uvc/uvc_ctrl.c
2846
iter->id == xqry->unit) {
drivers/media/usb/uvc/uvc_ctrl.c
2847
entity = iter;
drivers/misc/bcm-vk/bcm_vk_msg.c
1012
struct bcm_vk_wkent *entry = NULL, *iter;
drivers/misc/bcm-vk/bcm_vk_msg.c
1031
list_for_each_entry(iter, &chan->pendq[q_num], node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
1032
if (iter->ctx->idx == ctx->idx) {
drivers/misc/bcm-vk/bcm_vk_msg.c
1034
(iter->to_h_blks * VK_MSGQ_BLK_SIZE)) {
drivers/misc/bcm-vk/bcm_vk_msg.c
1035
list_del(&iter->node);
drivers/misc/bcm-vk/bcm_vk_msg.c
1037
entry = iter;
drivers/misc/bcm-vk/bcm_vk_msg.c
1040
tmp_msg = iter->to_h_msg[0];
drivers/misc/bcm-vk/bcm_vk_msg.c
1041
tmp_usr_msg_id = iter->usr_msg_id;
drivers/misc/bcm-vk/bcm_vk_msg.c
1042
tmp_blks = iter->to_h_blks;
drivers/misc/bcm-vk/bcm_vk_msg.c
760
struct bcm_vk_wkent *entry = NULL, *iter;
drivers/misc/bcm-vk/bcm_vk_msg.c
763
list_for_each_entry(iter, &chan->pendq[q_num], node) {
drivers/misc/bcm-vk/bcm_vk_msg.c
764
if (get_msg_id(&iter->to_v_msg[0]) == msg_id) {
drivers/misc/bcm-vk/bcm_vk_msg.c
765
list_del(&iter->node);
drivers/misc/bcm-vk/bcm_vk_msg.c
766
entry = iter;
drivers/misc/fastrpc.c
1879
struct fastrpc_buf *buf = NULL, *iter, *b;
drivers/misc/fastrpc.c
1887
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
drivers/misc/fastrpc.c
1888
if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
drivers/misc/fastrpc.c
1889
buf = iter;
drivers/misc/fastrpc.c
2009
struct fastrpc_map *map = NULL, *iter, *m;
drivers/misc/fastrpc.c
2016
list_for_each_entry_safe(iter, m, &fl->maps, node) {
drivers/misc/fastrpc.c
2017
if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
drivers/misc/fastrpc.c
2018
map = iter;
drivers/misc/vmw_vmci/vmci_context.c
648
struct vmci_handle_list *notifier = NULL, *iter, *tmp;
drivers/misc/vmw_vmci/vmci_context.c
658
list_for_each_entry_safe(iter, tmp,
drivers/misc/vmw_vmci/vmci_context.c
660
if (vmci_handle_is_equal(iter->handle, handle)) {
drivers/misc/vmw_vmci/vmci_context.c
661
list_del_rcu(&iter->node);
drivers/misc/vmw_vmci/vmci_context.c
663
notifier = iter;
drivers/mmc/host/dw_mmc-exynos.c
497
const u8 iter = 8;
drivers/mmc/host/dw_mmc-exynos.c
501
for (i = 0; i < iter; i++) {
drivers/mmc/host/dw_mmc-exynos.c
509
for (i = 0; i < iter; i++) {
drivers/mmc/host/dw_mmc-exynos.c
522
for (i = 0; i < iter; i++) {
drivers/mmc/host/sdhci-tegra.c
1014
u8 iter = TRIES_256;
drivers/mmc/host/sdhci-tegra.c
1024
iter = TRIES_128;
drivers/mmc/host/sdhci-tegra.c
1029
iter = TRIES_128;
drivers/mmc/host/sdhci-tegra.c
1045
val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
drivers/mmc/host/sdhci-tegra.c
1051
host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
drivers/mtd/devices/mtd_intel_dg.c
390
u32 iter = 0;
drivers/mtd/devices/mtd_intel_dg.c
401
++iter < NVM_NON_POSTED_ERASE_DONE_ITER) {
drivers/mtd/mtd_blkdevs.c
49
struct req_iterator iter;
drivers/mtd/mtd_blkdevs.c
76
rq_for_each_segment(bvec, req, iter)
drivers/mtd/mtd_blkdevs.c
83
rq_for_each_segment(bvec, req, iter)
drivers/mtd/mtdcore.c
1912
int (*iter)(struct mtd_info *,
drivers/mtd/mtdcore.c
1921
ret = iter(mtd, section, oobregion);
drivers/mtd/mtdcore.c
1981
int (*iter)(struct mtd_info *,
drivers/mtd/mtdcore.c
1989
&oobregion, iter);
drivers/mtd/mtdcore.c
2002
ret = iter(mtd, ++section, &oobregion);
drivers/mtd/mtdcore.c
2024
int (*iter)(struct mtd_info *,
drivers/mtd/mtdcore.c
2032
&oobregion, iter);
drivers/mtd/mtdcore.c
2045
ret = iter(mtd, ++section, &oobregion);
drivers/mtd/mtdcore.c
2061
int (*iter)(struct mtd_info *,
drivers/mtd/mtdcore.c
2069
ret = iter(mtd, section++, &oobregion);
drivers/mtd/mtdcore.c
388
int (*iter)(struct mtd_info *, int section,
drivers/mtd/mtdcore.c
398
err = iter(mtd, section, &region);
drivers/mtd/mtdoops.c
305
struct kmsg_dump_iter iter;
drivers/mtd/mtdoops.c
311
kmsg_dump_rewind(&iter);
drivers/mtd/mtdoops.c
315
kmsg_dump_get_buffer(&iter, true,
drivers/mtd/nand/spi/core.c
1030
struct nand_io_iter iter;
drivers/mtd/nand/spi/core.c
1039
nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
drivers/mtd/nand/spi/core.c
1041
iter.req.mode = MTD_OPS_RAW;
drivers/mtd/nand/spi/core.c
1043
ret = spinand_select_target(spinand, iter.req.pos.target);
drivers/mtd/nand/spi/core.c
1047
ret = spinand_write_page(spinand, &iter.req);
drivers/mtd/nand/spi/core.c
1051
ops->retlen += iter.req.datalen;
drivers/mtd/nand/spi/core.c
1052
ops->oobretlen += iter.req.ooblen;
drivers/mtd/nand/spi/core.c
796
struct nand_io_iter iter;
drivers/mtd/nand/spi/core.c
807
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
drivers/mtd/nand/spi/core.c
809
iter.req.mode = MTD_OPS_RAW;
drivers/mtd/nand/spi/core.c
811
ret = spinand_select_target(spinand, iter.req.pos.target);
drivers/mtd/nand/spi/core.c
816
ret = spinand_read_page(spinand, &iter.req);
drivers/mtd/nand/spi/core.c
842
ops->retlen += iter.req.datalen;
drivers/mtd/nand/spi/core.c
843
ops->oobretlen += iter.req.ooblen;
drivers/mtd/nand/spi/core.c
866
struct nand_io_iter iter;
drivers/mtd/nand/spi/core.c
882
nanddev_io_for_each_block(nand, NAND_PAGE_READ, from, ops, &iter) {
drivers/mtd/nand/spi/core.c
883
ret = spinand_select_target(spinand, iter.req.pos.target);
drivers/mtd/nand/spi/core.c
887
ret = nand_ecc_prepare_io_req(nand, &iter.req);
drivers/mtd/nand/spi/core.c
891
ret = spinand_load_page_op(spinand, &iter.req);
drivers/mtd/nand/spi/core.c
900
ret = spinand_read_from_cache_op(spinand, &iter.req);
drivers/mtd/nand/spi/core.c
904
ops->retlen += iter.req.datalen;
drivers/mtd/nand/spi/core.c
912
ret = nand_ecc_finish_io_req(nand, &iter.req);
drivers/mtd/ubi/block.c
190
struct req_iterator iter;
drivers/mtd/ubi/block.c
222
rq_for_each_segment(bvec, req, iter)
drivers/mtd/ubi/debug.c
508
static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
drivers/mtd/ubi/debug.c
512
int *block_number = iter;
drivers/net/bonding/bond_3ad.c
1552
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
1616
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
1848
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
1857
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
1903
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
2302
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
2337
bond_for_each_slave(bond, slave_iter, iter) {
drivers/net/bonding/bond_3ad.c
2414
bond_for_each_slave(bond, slave_iter, iter) {
drivers/net/bonding/bond_3ad.c
2460
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
2474
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
2523
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
2559
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
2580
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
2852
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
2856
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
2922
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
2928
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
2947
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
2953
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_3ad.c
859
struct list_head *iter;
drivers/net/bonding/bond_3ad.c
862
bond_for_each_slave_rcu(bond, slave, iter)
drivers/net/bonding/bond_alb.c
1167
struct list_head *iter;
drivers/net/bonding/bond_alb.c
1192
bond_for_each_slave(bond, tmp_slave1, iter) {
drivers/net/bonding/bond_alb.c
1242
struct list_head *iter;
drivers/net/bonding/bond_alb.c
1250
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_alb.c
1271
bond_for_each_slave(bond, rollback_slave, iter) {
drivers/net/bonding/bond_alb.c
1537
struct list_head *iter;
drivers/net/bonding/bond_alb.c
1555
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_alb.c
1571
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_alb.c
170
struct list_head *iter;
drivers/net/bonding/bond_alb.c
177
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_alb.c
310
struct list_head *iter;
drivers/net/bonding/bond_alb.c
313
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
1097
struct list_head *iter;
drivers/net/bonding/bond_main.c
1100
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
1153
struct list_head *iter;
drivers/net/bonding/bond_main.c
1160
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
1425
struct list_head *iter;
drivers/net/bonding/bond_main.c
1432
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
1452
struct list_head *iter;
drivers/net/bonding/bond_main.c
1455
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_main.c
1463
struct list_head *iter;
drivers/net/bonding/bond_main.c
1467
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
1495
struct list_head *iter;
drivers/net/bonding/bond_main.c
1502
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
1867
struct list_head *iter;
drivers/net/bonding/bond_main.c
1877
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_main.c
2631
struct list_head *iter;
drivers/net/bonding/bond_main.c
2635
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
2653
struct list_head *iter;
drivers/net/bonding/bond_main.c
2667
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
2778
struct list_head *iter;
drivers/net/bonding/bond_main.c
2782
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
2886
struct list_head *iter;
drivers/net/bonding/bond_main.c
2910
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
3039
struct list_head *iter;
drivers/net/bonding/bond_main.c
3049
netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
drivers/net/bonding/bond_main.c
3472
struct list_head *iter;
drivers/net/bonding/bond_main.c
3489
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
3555
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
3589
struct list_head *iter;
drivers/net/bonding/bond_main.c
3593
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
3659
struct list_head *iter;
drivers/net/bonding/bond_main.c
3663
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
3753
struct list_head *iter;
drivers/net/bonding/bond_main.c
3778
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
380
struct list_head *iter;
drivers/net/bonding/bond_main.c
3820
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
383
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
393
bond_for_each_slave(bond, rollback_slave, iter) {
drivers/net/bonding/bond_main.c
413
struct list_head *iter;
drivers/net/bonding/bond_main.c
416
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_main.c
4336
struct list_head *iter;
drivers/net/bonding/bond_main.c
4347
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
4383
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_main.c
4422
struct list_head *iter;
drivers/net/bonding/bond_main.c
4424
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_main.c
4464
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
drivers/net/bonding/bond_main.c
4468
iter = &dev->adj_list.lower;
drivers/net/bonding/bond_main.c
4473
ldev = netdev_next_lower_dev_rcu(now, &iter);
drivers/net/bonding/bond_main.c
4480
iter_stack[cur++] = iter;
drivers/net/bonding/bond_main.c
4494
iter = niter;
drivers/net/bonding/bond_main.c
4506
struct list_head *iter;
drivers/net/bonding/bond_main.c
4519
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
4686
struct list_head *iter;
drivers/net/bonding/bond_main.c
4697
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
4765
struct list_head *iter;
drivers/net/bonding/bond_main.c
4770
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
4797
bond_for_each_slave(bond, rollback_slave, iter) {
drivers/net/bonding/bond_main.c
4823
struct list_head *iter;
drivers/net/bonding/bond_main.c
4842
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
4868
bond_for_each_slave(bond, rollback_slave, iter) {
drivers/net/bonding/bond_main.c
4895
struct list_head *iter;
drivers/net/bonding/bond_main.c
4900
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
4909
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
5156
struct list_head *iter;
drivers/net/bonding/bond_main.c
5184
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
5362
struct list_head *iter;
drivers/net/bonding/bond_main.c
5368
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
5677
struct list_head *iter;
drivers/net/bonding/bond_main.c
5699
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
5742
bond_for_each_slave(bond, rollback_slave, iter) {
drivers/net/bonding/bond_main.c
5844
struct list_head *iter;
drivers/net/bonding/bond_main.c
5856
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
5891
struct list_head *iter;
drivers/net/bonding/bond_main.c
5905
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_main.c
6052
struct list_head *iter;
drivers/net/bonding/bond_main.c
6058
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_main.c
760
struct list_head *iter;
drivers/net/bonding/bond_main.c
769
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
845
struct list_head *iter;
drivers/net/bonding/bond_main.c
856
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
868
struct list_head *iter;
drivers/net/bonding/bond_main.c
879
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_main.c
993
struct list_head *iter;
drivers/net/bonding/bond_main.c
995
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_options.c
1151
struct list_head *iter;
drivers/net/bonding/bond_options.c
1155
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_options.c
1199
struct list_head *iter;
drivers/net/bonding/bond_options.c
1222
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_options.c
1376
struct list_head *iter;
drivers/net/bonding/bond_options.c
1380
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_options.c
1447
struct list_head *iter;
drivers/net/bonding/bond_options.c
1455
bond_for_each_slave(bond, slave, iter)
drivers/net/bonding/bond_options.c
1507
struct list_head *iter;
drivers/net/bonding/bond_options.c
1524
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_options.c
1608
struct list_head *iter;
drivers/net/bonding/bond_options.c
1614
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_options.c
1702
struct list_head *iter;
drivers/net/bonding/bond_options.c
1731
bond_for_each_slave(bond, slave, iter) {
drivers/net/bonding/bond_procfs.c
15
struct list_head *iter;
drivers/net/bonding/bond_procfs.c
24
bond_for_each_slave_rcu(bond, slave, iter)
drivers/net/bonding/bond_procfs.c
34
struct list_head *iter;
drivers/net/bonding/bond_procfs.c
42
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_sysfs.c
169
struct list_head *iter;
drivers/net/bonding/bond_sysfs.c
175
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/bonding/bond_sysfs.c
622
struct list_head *iter;
drivers/net/bonding/bond_sysfs.c
628
bond_for_each_slave_rcu(bond, slave, iter) {
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
101
static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
106
struct mcp251xfd_dump_object_reg *reg = iter->data;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
114
mcp251xfd_dump_header(iter, object_type, reg);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
118
struct mcp251xfd_dump_iter *iter)
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
147
mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TEF,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
152
struct mcp251xfd_dump_iter *iter,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
180
mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_RX,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
185
struct mcp251xfd_dump_iter *iter)
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
191
mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
195
struct mcp251xfd_dump_iter *iter)
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
223
mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TX,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
228
struct mcp251xfd_dump_iter *iter)
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
230
struct mcp251xfd_dump_object_header *hdr = iter->hdr;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
238
iter->hdr = NULL;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
243
struct mcp251xfd_dump_iter iter;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
263
file_size += sizeof(*iter.hdr) * obj_num;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
266
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
268
if (!iter.start) {
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
274
iter.hdr = iter.start;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
275
iter.data = &iter.hdr[obj_num];
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
277
mcp251xfd_dump_registers(priv, &iter);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
278
mcp251xfd_dump_tef_ring(priv, &iter);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
279
mcp251xfd_dump_rx_ring(priv, &iter);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
280
mcp251xfd_dump_tx_ring(priv, &iter);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
281
mcp251xfd_dump_end(priv, &iter);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
283
dev_coredumpv(&priv->spi->dev, iter.start,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
284
iter.data - iter.start, GFP_KERNEL);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
44
static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter,
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
48
struct mcp251xfd_dump_object_header *hdr = iter->hdr;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
51
len = data_end - iter->data;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
57
hdr->offset = cpu_to_le32(iter->data - iter->start);
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
60
iter->hdr++;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
61
iter->data += len;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
65
struct mcp251xfd_dump_iter *iter)
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
68
struct mcp251xfd_dump_object_reg *reg = iter->data;
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
98
mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
drivers/net/dsa/bcm_sf2_cfp.c
1303
unsigned int i, j, iter;
drivers/net/dsa/bcm_sf2_cfp.c
1321
iter = (i - 1) * s + j;
drivers/net/dsa/bcm_sf2_cfp.c
1322
data[iter] = core_readl(priv, stat->offset);
drivers/net/ethernet/airoha/airoha_ppe.c
934
struct airoha_flow_table_entry *iter;
drivers/net/ethernet/airoha/airoha_ppe.c
939
hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
drivers/net/ethernet/airoha/airoha_ppe.c
944
hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
drivers/net/ethernet/airoha/airoha_ppe.c
951
iter->hash = 0xffff;
drivers/net/ethernet/airoha/airoha_ppe.c
952
airoha_ppe_foe_remove_flow(ppe, iter);
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
332
#define bnge_for_each_napi_tx(iter, bnapi, txr) \
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
333
for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
334
txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
335
(bnapi)->tx_ring[++iter] : NULL)
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1216
#define bnxt_for_each_napi_tx(iter, bnapi, txr) \
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1217
for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1218
txr = (iter < BNXT_MAX_TXR_PER_NAPI - 1) ? \
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1219
(bnapi)->tx_ring[++iter] : NULL)
drivers/net/ethernet/broadcom/bnxt/bnxt.h
1844
struct rhashtable_iter iter;
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1792
struct rhashtable_iter *iter = &tc_info->iter;
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1796
rhashtable_walk_start(iter);
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1800
flow_node = rhashtable_walk_next(iter);
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1818
rhashtable_walk_stop(iter);
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1832
rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
1850
rhashtable_walk_exit(&tc_info->iter);
drivers/net/ethernet/chelsio/cxgb/common.h
308
#define for_each_port(adapter, iter) \
drivers/net/ethernet/chelsio/cxgb/common.h
309
for (iter = 0; iter < (adapter)->params.nports; ++iter)
drivers/net/ethernet/chelsio/cxgb3/common.h
613
#define for_each_port(adapter, iter) \
drivers/net/ethernet/chelsio/cxgb3/common.h
614
for (iter = 0; iter < (adapter)->params.nports; ++iter)
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3087
u32 size, j, iter;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3130
iter = up_cim_reg->ireg_offset_range;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3135
iter = up_cim_reg->ireg_offset_range;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3140
iter = 1;
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
3146
for (j = 0; j < iter; j++, buff++) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1648
#define for_each_port(adapter, iter) \
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1649
for (iter = 0; iter < (adapter)->params.nports; ++iter)
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
3994
struct scatterlist *iter;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4007
for_each_sg(adapter->hma.sgt->sgl, iter,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4009
page = sg_page(iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4022
struct scatterlist *sgl, *iter;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4078
for_each_sg(sgl, iter, sgt->orig_nents, i) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4087
sg_set_page(iter, newpage, page_size << page_order, 0);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4104
for_each_sg(sgl, iter, sgt->nents, i) {
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4105
newpage = sg_page(iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4106
adapter->hma.phy_addr[i] = sg_dma_address(iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1026
struct rhashtable_iter iter;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1031
rhashtable_walk_enter(&adap->flower_tbl, &iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1033
rhashtable_walk_start(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1035
while ((flower_entry = rhashtable_walk_next(&iter)) &&
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1053
rhashtable_walk_stop(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
1056
rhashtable_walk_exit(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
795
struct rhashtable_iter iter;
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
808
rhashtable_walk_enter(&adap->flower_tbl, &iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
810
rhashtable_walk_start(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
812
fe = rhashtable_walk_next(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
827
fe = rhashtable_walk_next(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
830
rhashtable_walk_stop(&iter);
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
832
rhashtable_walk_exit(&iter);
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
350
#define for_each_ethrxq(sge, iter) \
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
351
for (iter = 0; iter < (sge)->ethqsets; iter++)
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
306
#define for_each_port(adapter, iter) \
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
307
for (iter = 0; iter < (adapter)->params.nports; iter++)
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2148
struct list_head *iter;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2153
netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2168
struct list_head *iter;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
2183
netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
60
struct list_head *iter;
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
93
netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
drivers/net/ethernet/intel/i40e/i40e_main.c
7833
struct i40e_channel *ch = NULL, *ch_tmp, *iter;
drivers/net/ethernet/intel/i40e/i40e_main.c
7839
list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
drivers/net/ethernet/intel/i40e/i40e_main.c
7840
if (!i40e_is_channel_macvlan(iter)) {
drivers/net/ethernet/intel/i40e/i40e_main.c
7841
iter->fwd = fwd;
drivers/net/ethernet/intel/i40e/i40e_main.c
7846
iter->num_queue_pairs,
drivers/net/ethernet/intel/i40e/i40e_main.c
7847
iter->base_queue);
drivers/net/ethernet/intel/i40e/i40e_main.c
7848
for (i = 0; i < iter->num_queue_pairs; i++) {
drivers/net/ethernet/intel/i40e/i40e_main.c
7852
pf_q = iter->base_queue + i;
drivers/net/ethernet/intel/i40e/i40e_main.c
7856
tx_ring->ch = iter;
drivers/net/ethernet/intel/i40e/i40e_main.c
7860
rx_ring->ch = iter;
drivers/net/ethernet/intel/i40e/i40e_main.c
7862
ch = iter;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
32
struct list_head *iter;
drivers/net/ethernet/intel/ice/ice_eswitch_br.c
34
netdev_for_each_lower_dev(lag_dev, lower, iter) {
drivers/net/ethernet/intel/ice/ice_sched.c
2894
struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
drivers/net/ethernet/intel/ice/ice_sched.c
2912
list_for_each_entry_safe(iter, vtmp,
drivers/net/ethernet/intel/ice/ice_sched.c
2915
if (iter->vsi_handle == vsi_handle) {
drivers/net/ethernet/intel/ice/ice_sched.c
2916
old_agg_vsi_info = iter;
drivers/net/ethernet/intel/idpf/idpf.h
610
#define idpf_for_each_vport(adapter, iter) \
drivers/net/ethernet/intel/idpf/idpf.h
611
for (struct idpf_vport **__##iter = &(adapter)->vports[0], \
drivers/net/ethernet/intel/idpf/idpf.h
612
*iter = (adapter)->max_vports ? *__##iter : NULL; \
drivers/net/ethernet/intel/idpf/idpf.h
613
iter; \
drivers/net/ethernet/intel/idpf/idpf.h
614
iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \
drivers/net/ethernet/intel/idpf/idpf.h
615
*__##iter : NULL)
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
302
struct idpf_fsteer_fltr *f, *iter;
drivers/net/ethernet/intel/idpf/idpf_ethtool.c
330
list_for_each_entry_safe(f, iter,
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
128
int cgx, lmac, iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
162
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
163
if (iter >= MAX_LMAC_COUNT)
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
166
iter);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3506
struct rvu_npc_mcam_rule *iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3522
list_for_each_entry(iter, &mcam->mcam_rules, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3523
pf = rvu_get_pf(rvu->pdev, iter->owner);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3526
if (iter->owner & RVU_PFVF_FUNC_MASK) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3527
vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3532
seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3535
rvu_dbg_get_intf_name(iter->intf));
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3536
seq_printf(s, "\tmcam entry: %d\n", iter->entry);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3538
rvu_dbg_npc_mcam_show_flows(s, iter);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3539
if (is_npc_intf_rx(iter->intf)) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3540
target = iter->rx_action.pf_func;
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3549
seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3550
seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3553
rvu_dbg_npc_mcam_show_action(s, iter);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3555
enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3558
if (!iter->has_cntr)
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3560
seq_printf(s, "\tcounter: %d\n", iter->cntr);
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
3562
hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
4728
int iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
4775
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
4776
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
4780
__func__, cgx, iter);
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
4788
link = iter + slink;
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6289
struct nix_mcast_grp_elem *iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6292
list_for_each_entry(iter, &mcast_grp->mcast_grp_head, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6293
if (iter->mcast_grp_idx == mcast_grp_idx) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
6300
return iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1050
struct rvu_npc_mcam_rule *iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1053
list_for_each_entry(iter, &mcam->mcam_rules, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1054
if (iter->entry == entry) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1056
return iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1068
struct rvu_npc_mcam_rule *iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1071
list_for_each_entry(iter, &mcam->mcam_rules, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1072
if (iter->entry > rule->entry)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1074
head = &iter->list;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1557
struct rvu_npc_mcam_rule *iter, *tmp;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1565
list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1566
if (iter->owner == pcifunc) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1569
list_move_tail(&iter->list, &del_list);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1571
} else if (req->end && iter->entry >= req->start &&
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1572
iter->entry <= req->end) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1573
list_move_tail(&iter->list, &del_list);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1575
} else if (req->entry == iter->entry) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1579
NPC_AF_MATCH_STATX(iter->cntr));
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1580
list_move_tail(&iter->list, &del_list);
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1587
list_for_each_entry_safe(iter, tmp, &del_list, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1588
u16 entry = iter->entry;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
1592
if (npc_delete_flow(rvu, iter, pcifunc))
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1833
struct npc_exact_table_entry *tmp, *iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1837
list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1838
if (pcifunc != iter->pcifunc)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
1841
seq_id = iter->seq_id;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
788
struct npc_exact_table_entry *entry, *tmp, *iter;
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
851
list_for_each_entry_safe(iter, tmp, lhead, list) {
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
852
if (index < iter->index)
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
855
pprev = &iter->list;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1238
struct otx2_flow *iter;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1242
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1243
if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1244
eth_hdr = &iter->flow_spec.h_u.ether_spec;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1261
list_del(&iter->list);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1262
kfree(iter);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1338
struct otx2_flow *iter, *tmp;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1359
list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1360
list_del(&iter->list);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1361
kfree(iter);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1371
struct otx2_flow *iter, *tmp;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1382
list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1383
list_del(&iter->list);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1384
kfree(iter);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1511
struct otx2_flow *iter;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1514
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1515
if (iter->rule_type & DMAC_FILTER_RULE) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1516
eth_hdr = &iter->flow_spec.h_u.ether_spec;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
1518
iter->entry);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
459
struct otx2_flow *iter;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
461
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
462
if (iter->location == location)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
463
return iter;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
472
struct otx2_flow *iter;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
474
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
475
if (iter->location > flow->location)
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
477
head = &iter->list;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
500
struct otx2_flow *iter;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
505
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
506
if (iter->location == location) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
507
nfc->fs = iter->flow_spec;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
508
nfc->rss_context = iter->rss_ctx_id;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
962
struct otx2_tc_flow *iter, *tmp;
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
967
list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
968
list_del(&iter->list);
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
969
kfree(iter);
drivers/net/ethernet/marvell/prestera/prestera_main.c
1176
struct list_head *iter;
drivers/net/ethernet/marvell/prestera/prestera_main.c
1179
netdev_for_each_lower_dev(lag_dev, dev, iter) {
drivers/net/ethernet/marvell/prestera/prestera_router.c
1079
struct rhashtable_iter iter;
drivers/net/ethernet/marvell/prestera/prestera_router.c
1081
rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1082
rhashtable_walk_start(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1084
n_cache = rhashtable_walk_next(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1092
rhashtable_walk_stop(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1094
rhashtable_walk_start(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1096
rhashtable_walk_stop(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1097
rhashtable_walk_exit(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1129
struct rhashtable_iter iter;
drivers/net/ethernet/marvell/prestera/prestera_router.c
1131
rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1132
rhashtable_walk_start(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1134
n_cache = rhashtable_walk_next(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1142
rhashtable_walk_stop(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1145
rhashtable_walk_start(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1147
rhashtable_walk_stop(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
1148
rhashtable_walk_exit(&iter);
drivers/net/ethernet/marvell/prestera/prestera_router.c
322
struct fib6_info *iter;
drivers/net/ethernet/marvell/prestera/prestera_router.c
334
list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
drivers/net/ethernet/marvell/prestera/prestera_router.c
337
return &iter->fib6_nh->nh_common;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3534
struct list_head *iter;
drivers/net/ethernet/mediatek/mtk_eth_soc.c
3540
netdev_for_each_lower_dev(dev, ldev, iter) {
drivers/net/ethernet/mellanox/mlx4/fw.c
1522
struct mlx4_icm_iter iter;
drivers/net/ethernet/mellanox/mlx4/fw.c
1535
for (mlx4_icm_first(icm, &iter);
drivers/net/ethernet/mellanox/mlx4/fw.c
1536
!mlx4_icm_last(&iter);
drivers/net/ethernet/mellanox/mlx4/fw.c
1537
mlx4_icm_next(&iter)) {
drivers/net/ethernet/mellanox/mlx4/fw.c
1543
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
drivers/net/ethernet/mellanox/mlx4/fw.c
1547
(unsigned long long) mlx4_icm_addr(&iter),
drivers/net/ethernet/mellanox/mlx4/fw.c
1548
mlx4_icm_size(&iter));
drivers/net/ethernet/mellanox/mlx4/fw.c
1553
for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
drivers/net/ethernet/mellanox/mlx4/fw.c
1560
cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
drivers/net/ethernet/mellanox/mlx4/icm.h
100
iter->chunk = list_empty(&icm->chunk_list) ?
drivers/net/ethernet/mellanox/mlx4/icm.h
103
iter->page_idx = 0;
drivers/net/ethernet/mellanox/mlx4/icm.h
106
static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
drivers/net/ethernet/mellanox/mlx4/icm.h
108
return !iter->chunk;
drivers/net/ethernet/mellanox/mlx4/icm.h
111
static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
drivers/net/ethernet/mellanox/mlx4/icm.h
113
if (++iter->page_idx >= iter->chunk->nsg) {
drivers/net/ethernet/mellanox/mlx4/icm.h
114
if (iter->chunk->list.next == &iter->icm->chunk_list) {
drivers/net/ethernet/mellanox/mlx4/icm.h
115
iter->chunk = NULL;
drivers/net/ethernet/mellanox/mlx4/icm.h
119
iter->chunk = list_entry(iter->chunk->list.next,
drivers/net/ethernet/mellanox/mlx4/icm.h
121
iter->page_idx = 0;
drivers/net/ethernet/mellanox/mlx4/icm.h
125
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
drivers/net/ethernet/mellanox/mlx4/icm.h
127
if (iter->chunk->coherent)
drivers/net/ethernet/mellanox/mlx4/icm.h
128
return iter->chunk->buf[iter->page_idx].dma_addr;
drivers/net/ethernet/mellanox/mlx4/icm.h
130
return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
drivers/net/ethernet/mellanox/mlx4/icm.h
133
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
drivers/net/ethernet/mellanox/mlx4/icm.h
135
if (iter->chunk->coherent)
drivers/net/ethernet/mellanox/mlx4/icm.h
136
return iter->chunk->buf[iter->page_idx].size;
drivers/net/ethernet/mellanox/mlx4/icm.h
138
return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
drivers/net/ethernet/mellanox/mlx4/icm.h
97
struct mlx4_icm_iter *iter)
drivers/net/ethernet/mellanox/mlx4/icm.h
99
iter->icm = icm;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
196
struct list_head *iter;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
219
netdev_for_each_lower_dev(lag_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
168
struct list_head *iter;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
173
netdev_for_each_lower_dev(dev, lower, iter) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
44
struct list_head *iter;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
46
netdev_for_each_lower_dev(dev, lower, iter) {
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
86
struct list_head *iter;
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
92
netdev_for_each_lower_dev(dev, lower_dev, iter) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1436
struct mlx5e_macsec_device *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1441
list_for_each_entry(iter, device_list, macsec_device_list_element) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1443
macsec_sa = iter->tx_sa[i];
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1460
struct mlx5e_macsec_device *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1465
list_for_each_entry(iter, device_list, macsec_device_list_element) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
1466
sc_list = &iter->macsec_rx_sc_list_head;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
411
struct mlx5e_macsec_rx_sc *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
413
list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
414
if (iter->sci == sci)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
415
return iter;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
480
struct mlx5e_macsec_device *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
484
list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
485
if (iter->netdev == ctx->secy->netdev)
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
486
return iter;
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
400
struct mlx5e_ethtool_rule *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
402
list_for_each_entry(iter, &ethtool->rules, list) {
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
403
if (iter->flow_spec.location > rule->flow_spec.location)
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
405
head = &iter->list;
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
526
struct mlx5e_ethtool_rule *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
528
list_for_each_entry(iter, &ethtool->rules, list) {
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
529
if (iter->flow_spec.location == location)
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
530
return iter;
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
853
struct mlx5e_ethtool_rule *iter;
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
856
list_for_each_entry_safe(iter, temp, &ethtool->rules, list)
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
857
del_ethtool_rule(fs, iter);
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
161
struct rhashtable_iter iter;
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
171
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
172
rhashtable_walk_start(&iter);
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
173
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
182
rhashtable_walk_stop(&iter);
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
183
rhashtable_walk_exit(&iter);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1000
ft = find_closest_ft_recursive(iter, &iter->children, reverse);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1079
struct mlx5_flow_table *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1082
fs_for_each_ft(iter, prio) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1083
err = root->cmds->modify_flow_table(root, iter, ft);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1087
iter->id, iter->type, err);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1188
struct fs_node *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1193
list_for_each_entry(iter, &fte->dup->children, list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1194
tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1267
struct mlx5_flow_rule *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1282
list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1283
if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1284
iter->ft->ns == new_next_ft->ns)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1287
err = _mlx5_modify_rule_destination(iter, &dest);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1326
struct mlx5_flow_table *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1328
fs_for_each_ft(iter, prio) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1329
if (iter->level > ft->level)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1331
prev = &iter->node.list;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2066
struct match_list *iter, *match_tmp;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2068
list_for_each_entry_safe(iter, match_tmp, &head->list,
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2070
tree_put_node(&iter->g->node, ft_locked);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2071
list_del(&iter->list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2072
kfree(iter);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2114
struct match_list *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2117
list_for_each_entry(iter, match_head, list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2118
version += (u64)atomic_read(&iter->g->node.version);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2232
struct match_list *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2252
list_for_each_entry(iter, match_head, list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2255
g = iter->g;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2293
list_for_each_entry(iter, match_head, list) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
2294
g = iter->g;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3142
struct fs_node *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3146
list_for_each_entry_safe(iter, temp, &node->children, list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
3147
clean_tree(iter);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
694
struct fs_node *iter;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
702
list_for_each_entry(iter, &fte->node.children, list)
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
703
iter->del_sw_func = del_sw_hw_rule;
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
989
struct fs_node *iter = list_entry(start, struct fs_node, list);
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
995
list_for_each_advance_continue(iter, &root->children, reverse) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
996
if (iter->type == FS_TYPE_FLOW_TABLE) {
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
997
fs_get_obj(ft, iter);
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
102
struct mlx5_irq *iter;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
107
xa_for_each_range(&pool->irqs, index, iter, start, end) {
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
108
struct cpumask *iter_mask = mlx5_irq_get_affinity_mask(iter);
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
109
int iter_refcount = mlx5_irq_read_locked(iter);
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
116
return iter;
drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
122
irq = iter;
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
17
#define devcom_for_each_component(iter) \
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
18
list_for_each_entry(iter, &devcom_comp_list, comp_list)
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
53
struct mlx5_devcom_dev *iter;
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
55
list_for_each_entry(iter, &devcom_dev_list, list)
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
56
if (iter->dev == dev)
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
2284
struct mlx5_macsec_device *iter, *macsec_device = NULL;
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
2290
list_for_each_entry(iter, &macsec_fs->macsec_devices_list, macsec_devices_list_entry) {
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
2291
if (iter->macdev == macdev) {
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
2292
macsec_device = iter;
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
655
struct mlx5_macsec_device *iter, *macsec_device = NULL;
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
659
list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
660
if (iter->macdev == macdev) {
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
661
macsec_device = iter;
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
700
struct mlx5_macsec_device *iter, *macsec_device = NULL;
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
720
list_for_each_entry(iter, macsec_devices_list, macsec_devices_list_entry) {
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
721
if (iter->macdev == macdev) {
drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
722
macsec_device = iter;
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
224
struct fw_page *iter;
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
227
list_for_each_entry(iter, &dev->priv.free_list, list) {
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
228
if (iter->function != function)
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
230
fp = iter;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
875
int data_len, iter = 0, cur_sent;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
890
send_info.remote_addr = arg_id + iter;
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
896
iter++;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4092
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4097
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4270
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4282
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4301
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4328
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4330
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4601
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4603
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4615
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4617
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4673
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4676
netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4937
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
4940
netdev_for_each_lower_dev(lag_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
5026
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
5029
netdev_for_each_lower_dev(lag_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
432
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
434
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
848
struct rhashtable_iter iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
855
rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
856
rhashtable_walk_start(&iter);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
857
while ((obj = rhashtable_walk_next(&iter))) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
865
rhashtable_walk_stop(&iter);
drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
866
rhashtable_walk_exit(&iter);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
10152
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
10162
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
10178
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
10198
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
10201
netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7821
struct fib6_info *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7840
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7844
rt_arr[i + 1] = iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
7845
fib6_info_hold(iter);
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9246
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
9249
netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
342
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
344
netdev_for_each_lower_dev(lag_dev, dev, iter)
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
199
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
202
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
217
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
231
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
233
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2572
struct list_head *iter;
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
2574
netdev_for_each_lower_dev(br_dev, dev, iter) {
drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
290
struct list_head *iter;
drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
293
netdev_for_each_lower_dev(dev, lower, iter) {
drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
314
struct list_head *iter;
drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
317
netdev_for_each_lower_dev(dev, lower, iter) {
drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
365
struct list_head *iter;
drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
371
netdev_for_each_lower_dev(upper, dev, iter) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
1439
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
146
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1461
vcap_iter_init(&iter, vctrl->vcaps[vt].act_width, tgt,
drivers/net/ethernet/microchip/vcap/vcap_api.c
1463
vcap_decode_field(actstream, &iter, actionfield[idx].width,
drivers/net/ethernet/microchip/vcap/vcap_api.c
153
vcap_iter_set(&iter, sw_width, tg, 0);
drivers/net/ethernet/microchip/vcap/vcap_api.c
154
while (iter.tg->width) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
156
iter.offset = iter.tg->offset;
drivers/net/ethernet/microchip/vcap/vcap_api.c
157
vcap_iter_update(&iter);
drivers/net/ethernet/microchip/vcap/vcap_api.c
158
for (idx = 0; idx < iter.tg->width; idx++) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
163
vcap_set_bit(stream, &iter, 0x1);
drivers/net/ethernet/microchip/vcap/vcap_api.c
1642
struct vcap_admin *iter, *last = NULL;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1645
list_for_each_entry(iter, &vctrl->list, list) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
1646
if (iter->first_cid > max_cid &&
drivers/net/ethernet/microchip/vcap/vcap_api.c
1647
iter->ingress == ingress) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
1648
last = iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
1649
max_cid = iter->first_cid;
drivers/net/ethernet/microchip/vcap/vcap_api.c
165
vcap_set_bit(stream, &iter,
drivers/net/ethernet/microchip/vcap/vcap_api.c
166
(iter.tg->value >> idx) & 0x1);
drivers/net/ethernet/microchip/vcap/vcap_api.c
167
iter.offset++;
drivers/net/ethernet/microchip/vcap/vcap_api.c
168
vcap_iter_update(&iter);
drivers/net/ethernet/microchip/vcap/vcap_api.c
170
iter.tg++; /* next typegroup */
drivers/net/ethernet/microchip/vcap/vcap_api.c
2103
struct vcap_rule_internal *duprule, *iter, *elem = NULL;
drivers/net/ethernet/microchip/vcap/vcap_api.c
2115
list_for_each_entry(iter, &admin->rules, list) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
2116
if (ri->sort_key < iter->sort_key) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
2117
elem = iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
228
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
255
vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
drivers/net/ethernet/microchip/vcap/vcap_api.c
256
vcap_decode_field(mskstream, &iter, typefld->width, (u8 *)&mask);
drivers/net/ethernet/microchip/vcap/vcap_api.c
264
vcap_iter_init(&iter, vcap->sw_width, tgt, typefld->offset);
drivers/net/ethernet/microchip/vcap/vcap_api.c
265
vcap_decode_field(keystream, &iter, typefld->width, (u8 *)&value);
drivers/net/ethernet/microchip/vcap/vcap_api.c
275
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
278
vcap_iter_set(&iter, sw_width, tgt, 0);
drivers/net/ethernet/microchip/vcap/vcap_api.c
280
while (iter.tg->width) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
282
u32 tg_value = iter.tg->value;
drivers/net/ethernet/microchip/vcap/vcap_api.c
285
tg_value = (1 << iter.tg->width) - 1;
drivers/net/ethernet/microchip/vcap/vcap_api.c
287
iter.offset = iter.tg->offset;
drivers/net/ethernet/microchip/vcap/vcap_api.c
288
vcap_iter_update(&iter);
drivers/net/ethernet/microchip/vcap/vcap_api.c
289
for (idx = 0; idx < iter.tg->width; idx++) {
drivers/net/ethernet/microchip/vcap/vcap_api.c
291
if (vcap_get_bit(stream, &iter))
drivers/net/ethernet/microchip/vcap/vcap_api.c
293
iter.offset++;
drivers/net/ethernet/microchip/vcap/vcap_api.c
294
vcap_iter_update(&iter);
drivers/net/ethernet/microchip/vcap/vcap_api.c
298
iter.tg++; /* next typegroup */
drivers/net/ethernet/microchip/vcap/vcap_api.c
453
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
493
vcap_iter_init(&iter, sw_width, tgt, rf->offset);
drivers/net/ethernet/microchip/vcap/vcap_api.c
494
vcap_encode_field(cache->keystream, &iter, rf->width, value);
drivers/net/ethernet/microchip/vcap/vcap_api.c
495
vcap_iter_init(&iter, sw_width, tgt, rf->offset);
drivers/net/ethernet/microchip/vcap/vcap_api.c
496
vcap_encode_field(cache->maskstream, &iter, rf->width, mask);
drivers/net/ethernet/microchip/vcap/vcap_api.c
747
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api.c
777
vcap_iter_init(&iter, act_width, tgt, rf->offset);
drivers/net/ethernet/microchip/vcap/vcap_api.c
778
vcap_encode_field(cache->actionstream, &iter, rf->width, value);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
330
struct vcap_stream_iter iter = {
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
339
vcap_set_bit(stream, &iter, 1);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
347
struct vcap_stream_iter iter = {
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
356
vcap_set_bit(stream, &iter, 0);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
365
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
378
vcap_iter_init(&iter, 52, typegroups, 86);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
380
KUNIT_EXPECT_EQ(test, 52, iter.sw_width);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
381
KUNIT_EXPECT_EQ(test, 86 + 2, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
382
KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
383
KUNIT_EXPECT_EQ(test, 4, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
385
vcap_iter_init(&iter, 49, typegroups2, 134);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
387
KUNIT_EXPECT_EQ(test, 49, iter.sw_width);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
388
KUNIT_EXPECT_EQ(test, 134 + 7, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
389
KUNIT_EXPECT_EQ(test, 5, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
390
KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
395
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
407
vcap_iter_init(&iter, 49, typegroups, 86);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
409
KUNIT_EXPECT_EQ(test, 49, iter.sw_width);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
410
KUNIT_EXPECT_EQ(test, 86 + 5, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
411
KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
412
KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
414
vcap_iter_next(&iter);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
416
KUNIT_EXPECT_EQ(test, 91 + 1, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
417
KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
418
KUNIT_EXPECT_EQ(test, 11, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
421
vcap_iter_next(&iter);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
423
KUNIT_EXPECT_EQ(test, 92 + 6 + 2, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
424
KUNIT_EXPECT_EQ(test, 4, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
425
KUNIT_EXPECT_EQ(test, 2, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
459
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
471
vcap_iter_init(&iter, 49, typegroups, 44);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
473
KUNIT_EXPECT_EQ(test, 48, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
474
KUNIT_EXPECT_EQ(test, 1, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
475
KUNIT_EXPECT_EQ(test, 16, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
477
vcap_encode_bit(stream, &iter, 1);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
486
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
504
vcap_iter_init(&iter, 49, typegroups, rf.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
506
KUNIT_EXPECT_EQ(test, 91, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
507
KUNIT_EXPECT_EQ(test, 3, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
508
KUNIT_EXPECT_EQ(test, 10, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
510
vcap_encode_field(stream, &iter, rf.width, value);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
537
struct vcap_stream_iter iter;
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
553
vcap_iter_init(&iter, sw_width, tgt, rf.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
555
KUNIT_EXPECT_EQ(test, 1, iter.regs_per_sw);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
556
KUNIT_EXPECT_EQ(test, 21, iter.sw_width);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
557
KUNIT_EXPECT_EQ(test, 25 + 3 + 2, iter.offset);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
558
KUNIT_EXPECT_EQ(test, 1, iter.reg_idx);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
559
KUNIT_EXPECT_EQ(test, 25 + 3 + 2 - sw_width, iter.reg_bitpos);
drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
561
vcap_encode_field(stream, &iter, rf.width, value);
drivers/net/ethernet/mscc/ocelot_net.c
1553
struct list_head *iter;
drivers/net/ethernet/mscc/ocelot_net.c
1556
netdev_for_each_lower_dev(dev, lower, iter) {
drivers/net/ethernet/mscc/ocelot_net.c
1590
struct list_head *iter;
drivers/net/ethernet/mscc/ocelot_net.c
1593
netdev_for_each_lower_dev(dev, lower, iter) {
drivers/net/ethernet/netronome/nfp/abm/cls.c
110
struct nfp_abm_u32_match *iter;
drivers/net/ethernet/netronome/nfp/abm/cls.c
112
list_for_each_entry(iter, &alink->dscp_map, list)
drivers/net/ethernet/netronome/nfp/abm/cls.c
113
if ((prio & iter->mask) == iter->val)
drivers/net/ethernet/netronome/nfp/abm/cls.c
114
return iter->band;
drivers/net/ethernet/netronome/nfp/abm/cls.c
158
struct nfp_abm_u32_match *iter;
drivers/net/ethernet/netronome/nfp/abm/cls.c
160
list_for_each_entry(iter, &alink->dscp_map, list)
drivers/net/ethernet/netronome/nfp/abm/cls.c
161
if (iter->handle == knode->handle) {
drivers/net/ethernet/netronome/nfp/abm/cls.c
162
list_del(&iter->list);
drivers/net/ethernet/netronome/nfp/abm/cls.c
163
kfree(iter);
drivers/net/ethernet/netronome/nfp/abm/cls.c
174
struct nfp_abm_u32_match *match = NULL, *iter;
drivers/net/ethernet/netronome/nfp/abm/cls.c
189
list_for_each_entry(iter, &alink->dscp_map, list) {
drivers/net/ethernet/netronome/nfp/abm/cls.c
192
if (iter->handle == knode->handle) {
drivers/net/ethernet/netronome/nfp/abm/cls.c
193
match = iter;
drivers/net/ethernet/netronome/nfp/abm/cls.c
197
cmask = iter->mask & mask;
drivers/net/ethernet/netronome/nfp/abm/cls.c
198
if ((iter->val & cmask) == (val & cmask) &&
drivers/net/ethernet/netronome/nfp/abm/cls.c
199
iter->band != knode->res->classid) {
drivers/net/ethernet/netronome/nfp/abm/qdisc.c
248
struct radix_tree_iter iter;
drivers/net/ethernet/netronome/nfp/abm/qdisc.c
260
radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
drivers/net/ethernet/netronome/nfp/abm/qdisc.c
269
radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
drivers/net/ethernet/netronome/nfp/abm/qdisc.c
288
struct radix_tree_iter iter;
drivers/net/ethernet/netronome/nfp/abm/qdisc.c
303
radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1790
struct rhashtable_iter iter;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1792
rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1793
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1794
while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1797
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1799
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1801
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1802
rhashtable_walk_exit(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1944
struct rhashtable_iter iter;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1947
rhashtable_walk_enter(&priv->ct_zone_table, &iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1948
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1949
while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1952
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1954
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1956
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1957
rhashtable_walk_exit(&iter);
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
675
struct rhashtable_iter iter;
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
678
rhashtable_walk_enter(&fl_priv->meter_table, &iter);
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
679
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
681
while ((meter_entry = rhashtable_walk_next(&iter)) != NULL) {
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
688
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
689
rhashtable_walk_exit(&iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
376
struct rhashtable_iter iter;
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
380
rhashtable_walk_enter(&priv->neigh_table, &iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
381
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
382
while ((nn_entry = rhashtable_walk_next(&iter)) != NULL) {
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
395
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
396
rhashtable_walk_exit(&iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
404
struct rhashtable_iter iter;
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
408
rhashtable_walk_enter(&priv->neigh_table, &iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
409
rhashtable_walk_start(&iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
410
while ((neigh = rhashtable_walk_next(&iter)) != NULL) {
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
434
rhashtable_walk_stop(&iter);
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
435
rhashtable_walk_exit(&iter);
drivers/net/ethernet/qlogic/qed/qed_vf.c
108
iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
drivers/net/ethernet/qlogic/qed/qed_vf.c
109
while (!*done && iter--) {
drivers/net/ethernet/qlogic/qed/qed_vf.c
114
iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
drivers/net/ethernet/qlogic/qed/qed_vf.c
115
while (!*done && iter--) {
drivers/net/ethernet/qlogic/qed/qed_vf.c
68
int iter, rc = 0;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1553
int i, iter, rc = 0;
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1572
for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
1609
if (iter == QEDE_SELFTEST_POLL_COUNT) {
drivers/net/ethernet/sun/ldmvsw.c
171
struct vnet *iter;
drivers/net/ethernet/sun/ldmvsw.c
197
list_for_each_entry(iter, &vnet_list, list) {
drivers/net/ethernet/sun/ldmvsw.c
198
if (iter->local_mac == *local_mac) {
drivers/net/ethernet/sun/ldmvsw.c
199
vp = iter;
drivers/net/ethernet/sun/sunvnet.c
334
struct vnet *iter, *vp;
drivers/net/ethernet/sun/sunvnet.c
338
list_for_each_entry(iter, &vnet_list, list) {
drivers/net/ethernet/sun/sunvnet.c
339
if (iter->local_mac == *local_mac) {
drivers/net/ethernet/sun/sunvnet.c
340
vp = iter;
drivers/net/netdevsim/fib.c
580
struct nsim_fib6_rt_nh *iter, *tmp;
drivers/net/netdevsim/fib.c
582
list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list)
drivers/net/netdevsim/fib.c
583
nsim_fib6_rt_nh_del(fib6_rt, iter->rt);
drivers/net/netdevsim/fib.c
804
struct fib6_info *iter;
drivers/net/netdevsim/fib.c
823
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
drivers/net/netdevsim/fib.c
827
rt_arr[i + 1] = iter;
drivers/net/netdevsim/fib.c
828
fib6_info_hold(iter);
drivers/net/phy/mscc/mscc_serdes.c
321
u32 iter;
drivers/net/phy/mscc/mscc_serdes.c
443
for (iter = 0; iter < gp_iter; iter++) {
drivers/net/tap.c
706
struct iov_iter *iter)
drivers/net/tap.c
722
ret = tun_vnet_hdr_put(vnet_hdr_len, iter, &vnet_hdr);
drivers/net/tap.c
740
ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
drivers/net/tap.c
741
if (ret || !iov_iter_count(iter))
drivers/net/tap.c
744
ret = copy_to_iter(&veth, sizeof(veth), iter);
drivers/net/tap.c
745
if (ret != sizeof(veth) || !iov_iter_count(iter))
drivers/net/tap.c
749
ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
drivers/net/tun.c
2008
struct iov_iter *iter)
drivers/net/tun.c
2018
ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso);
drivers/net/tun.c
2023
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
drivers/net/tun.c
2036
struct iov_iter *iter)
drivers/net/tun.c
2054
if (iov_iter_count(iter) < sizeof(pi))
drivers/net/tun.c
2058
if (iov_iter_count(iter) < total) {
drivers/net/tun.c
2063
if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
drivers/net/tun.c
2082
iter, gso);
drivers/net/tun.c
2096
ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
drivers/net/tun.c
2097
if (ret || !iov_iter_count(iter))
drivers/net/tun.c
2100
ret = copy_to_iter(&veth, sizeof(veth), iter);
drivers/net/tun.c
2101
if (ret != sizeof(veth) || !iov_iter_count(iter))
drivers/net/tun.c
2105
skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
drivers/net/tun_vnet.h
157
struct iov_iter *iter,
drivers/net/tun_vnet.h
162
if (unlikely(iov_iter_count(iter) < sz))
drivers/net/tun_vnet.h
165
if (unlikely(copy_to_iter(hdr, parsed_size, iter) != parsed_size))
drivers/net/tun_vnet.h
168
if (iov_iter_zero(sz - parsed_size, iter) != sz - parsed_size)
drivers/net/tun_vnet.h
174
static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter,
drivers/net/tun_vnet.h
177
return __tun_vnet_hdr_put(sz, 0, iter, hdr);
drivers/net/usb/qmi_wwan.c
142
struct list_head *iter;
drivers/net/usb/qmi_wwan.c
146
netdev_for_each_upper_dev_rcu(dev->net, ldev, iter) {
drivers/net/usb/qmi_wwan.c
1580
struct list_head *iter;
drivers/net/usb/qmi_wwan.c
1594
netdev_for_each_upper_dev_rcu(dev->net, ldev, iter)
drivers/net/usb/qmi_wwan.c
395
struct list_head *iter;
drivers/net/usb/qmi_wwan.c
400
netdev_for_each_upper_dev_rcu(dev, ldev, iter) {
drivers/net/vrf.c
1669
struct list_head *iter;
drivers/net/vrf.c
1671
netdev_for_each_lower_dev(dev, port_dev, iter)
drivers/net/wireless/ath/ath10k/wmi-tlv.c
112
ret = iter(ar, tlv_tag, tlv_len, ptr, data);
drivers/net/wireless/ath/ath10k/wmi-tlv.c
72
int (*iter)(struct ath10k *ar, u16 tag, u16 len,
drivers/net/wireless/ath/ath11k/dp_rx.c
1326
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
drivers/net/wireless/ath/ath11k/dp_rx.c
1352
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
drivers/net/wireless/ath/ath11k/dp_rx.h
81
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
drivers/net/wireless/ath/ath11k/wmi.c
180
int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
drivers/net/wireless/ath/ath11k/wmi.c
217
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
drivers/net/wireless/ath/ath12k/dp_htt.c
149
int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
drivers/net/wireless/ath/ath12k/dp_htt.c
175
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
drivers/net/wireless/ath/ath12k/dp_htt.h
1526
int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
drivers/net/wireless/ath/ath12k/wmi.c
233
int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
drivers/net/wireless/ath/ath12k/wmi.c
270
ret = iter(ab, tlv_tag, tlv_len, ptr, data);
drivers/net/wireless/ath/carl9170/debug.c
299
struct carl9170_sta_tid *iter;
drivers/net/wireless/ath/carl9170/debug.c
305
list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
drivers/net/wireless/ath/carl9170/debug.c
307
spin_lock_bh(&iter->lock);
drivers/net/wireless/ath/carl9170/debug.c
310
cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
drivers/net/wireless/ath/carl9170/debug.c
311
iter->max, iter->state, iter->counter);
drivers/net/wireless/ath/carl9170/debug.c
314
CARL9170_BAW_BITS, iter->bitmap);
drivers/net/wireless/ath/carl9170/debug.c
323
offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn));
drivers/net/wireless/ath/carl9170/debug.c
326
offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) %
drivers/net/wireless/ath/carl9170/debug.c
331
" currently queued:%d\n", skb_queue_len(&iter->queue));
drivers/net/wireless/ath/carl9170/debug.c
334
skb_queue_walk(&iter->queue, skb) {
drivers/net/wireless/ath/carl9170/debug.c
343
spin_unlock_bh(&iter->lock);
drivers/net/wireless/ath/carl9170/debug.c
435
struct carl9170_vif_info *iter;
drivers/net/wireless/ath/carl9170/debug.c
445
list_for_each_entry_rcu(iter, &ar->vif_list, list) {
drivers/net/wireless/ath/carl9170/debug.c
446
struct ieee80211_vif *vif = carl9170_get_vif(iter);
drivers/net/wireless/ath/carl9170/debug.c
449
"Master" : " Slave"), iter->id, vif->type, vif->addr,
drivers/net/wireless/ath/carl9170/debug.c
450
iter->enable_beacon ? "beaconing " : "");
drivers/net/wireless/ath/carl9170/fw.c
23
const struct carl9170fw_desc_head *iter;
drivers/net/wireless/ath/carl9170/fw.c
25
carl9170fw_for_each_hdr(iter, ar->fw.desc) {
drivers/net/wireless/ath/carl9170/fw.c
26
if (carl9170fw_desc_cmp(iter, descid, len,
drivers/net/wireless/ath/carl9170/fw.c
28
return (void *)iter;
drivers/net/wireless/ath/carl9170/fw.c
32
if (carl9170fw_desc_cmp(iter, descid, len,
drivers/net/wireless/ath/carl9170/fw.c
34
return (void *)iter;
drivers/net/wireless/ath/carl9170/main.c
1050
struct carl9170_vif_info *iter;
drivers/net/wireless/ath/carl9170/main.c
1055
list_for_each_entry_rcu(iter, &ar->vif_list, list) {
drivers/net/wireless/ath/carl9170/main.c
1056
if (iter->active && iter->enable_beacon)
drivers/net/wireless/ath/carl9170/tx.c
1402
struct sk_buff *iter;
drivers/net/wireless/ath/carl9170/tx.c
1437
skb_queue_reverse_walk(&agg->queue, iter) {
drivers/net/wireless/ath/carl9170/tx.c
1438
qseq = carl9170_get_seq(iter);
drivers/net/wireless/ath/carl9170/tx.c
1441
__skb_queue_after(&agg->queue, iter, skb);
drivers/net/wireless/ath/carl9170/tx.c
605
struct carl9170_sta_tid *iter;
drivers/net/wireless/ath/carl9170/tx.c
612
list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
drivers/net/wireless/ath/carl9170/tx.c
613
if (iter->state < CARL9170_TID_STATE_IDLE)
drivers/net/wireless/ath/carl9170/tx.c
616
spin_lock_bh(&iter->lock);
drivers/net/wireless/ath/carl9170/tx.c
617
skb = skb_peek(&iter->queue);
drivers/net/wireless/ath/carl9170/tx.c
627
sta = iter->sta;
drivers/net/wireless/ath/carl9170/tx.c
631
ieee80211_stop_tx_ba_session(sta, iter->tid);
drivers/net/wireless/ath/carl9170/tx.c
633
spin_unlock_bh(&iter->lock);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1341
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1354
iter->internal_txf = 0;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1355
iter->fifo_size = 0;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1356
iter->fifo = -1;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1358
iter->lmac = 1;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1360
iter->lmac = 0;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1363
if (!iter->internal_txf) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1364
for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1365
iter->fifo_size =
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1366
cfg->lmac[iter->lmac].txfifo_size[iter->fifo];
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1367
if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1370
iter->fifo--;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1373
iter->internal_txf = 1;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1379
for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1380
iter->fifo_size =
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1381
cfg->internal_txfifo_size[iter->fifo - txf_num];
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1382
if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo)))
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1395
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1409
range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1411
range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1413
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1444
for (i = 0; i < iter->fifo_size; i += sizeof(*data))
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
1449
reg_dump, iter->fifo_size);
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2112
struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
2122
size += iter->fifo_size;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
582
struct scatterlist *new, *iter;
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
613
for_each_sg(new, iter, n_fill, i) {
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
621
sg_set_page(iter, new_page, PAGE_SIZE, 0);
drivers/net/wireless/intel/iwlwifi/mei/main.c
2063
unsigned int iter = IWLMEI_DEVICE_DOWN_WAIT_ITERATION;
drivers/net/wireless/intel/iwlwifi/mei/main.c
2071
while (!down && iter--) {
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1102
struct iwl_mvm_smooth_entry *resp = NULL, *iter;
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1119
list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1120
if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
1121
resp = iter;
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
676
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
42
u32 block, u32 vec, u32 iter)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
48
for (i = 0; i < iter; i++)
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
548
int iter;
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
563
for (iter = 0; iter < 10; iter++) {
drivers/net/wireless/marvell/libertas/firmware.c
105
if (iter->model != priv->fw_model) {
drivers/net/wireless/marvell/libertas/firmware.c
106
iter++;
drivers/net/wireless/marvell/libertas/firmware.c
110
priv->fw_iter = iter;
drivers/net/wireless/marvell/libertas/firmware.c
111
do_load_firmware(priv, iter->helper, helper_firmware_cb);
drivers/net/wireless/marvell/libertas/firmware.c
177
const struct lbs_fw_table *iter;
drivers/net/wireless/marvell/libertas/firmware.c
184
iter = fw_table;
drivers/net/wireless/marvell/libertas/firmware.c
185
while (iter && iter->helper) {
drivers/net/wireless/marvell/libertas/firmware.c
186
if (iter->model != card_model)
drivers/net/wireless/marvell/libertas/firmware.c
190
ret = request_firmware(helper, iter->helper, dev);
drivers/net/wireless/marvell/libertas/firmware.c
198
if (iter->fwname == NULL)
drivers/net/wireless/marvell/libertas/firmware.c
203
ret = request_firmware(mainfw, iter->fwname, dev);
drivers/net/wireless/marvell/libertas/firmware.c
217
iter++;
drivers/net/wireless/marvell/libertas/firmware.c
86
const struct lbs_fw_table *iter;
drivers/net/wireless/marvell/libertas/firmware.c
89
iter = priv->fw_table;
drivers/net/wireless/marvell/libertas/firmware.c
91
iter = ++priv->fw_iter;
drivers/net/wireless/marvell/libertas/firmware.c
99
if (!iter->helper) {
drivers/net/wireless/mediatek/mt76/dma.c
651
struct sk_buff *iter;
drivers/net/wireless/mediatek/mt76/dma.c
682
skb_walk_frags(skb, iter) {
drivers/net/wireless/mediatek/mt76/dma.c
686
addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
drivers/net/wireless/mediatek/mt76/dma.c
692
tx_info.buf[n++].len = iter->len;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1239
struct mt7915_twt_flow *iter;
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1245
list_for_each_entry_rcu(iter, &dev->twt_list, list)
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1248
iter->wcid, iter->id,
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1249
iter->sched ? 's' : 'u',
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1250
iter->protection ? 'p' : '-',
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1251
iter->trigger ? 't' : '-',
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1252
iter->flowtype ? '-' : 'a',
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1253
iter->exp, iter->mantissa,
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
1254
iter->duration, iter->tsf);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2196
struct mt7915_twt_flow *iter, *iter_next;
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2200
iter = list_first_entry_or_null(&dev->twt_list,
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2202
if (!iter || !iter->sched || iter->start_tsf > duration) {
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2208
list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2209
start_tsf = iter->start_tsf +
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2210
mt7915_mac_twt_duration_align(iter->duration);
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2211
if (list_is_last(&iter->list, &dev->twt_list))
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
2216
list_add(&flow->list, &iter->list);
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
801
struct mt7996_twt_flow *iter;
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
807
list_for_each_entry_rcu(iter, &dev->twt_list, list)
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
810
iter->wcid, iter->id,
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
811
iter->sched ? 's' : 'u',
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
812
iter->protection ? 'p' : '-',
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
813
iter->trigger ? 't' : '-',
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
814
iter->flowtype ? '-' : 'a',
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
815
iter->exp, iter->mantissa,
drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
816
iter->duration, iter->tsf);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3115
struct mt7996_twt_flow *iter, *iter_next;
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3119
iter = list_first_entry_or_null(&dev->twt_list,
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3121
if (!iter || !iter->sched || iter->start_tsf > duration) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3127
list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3128
start_tsf = iter->start_tsf +
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3129
mt7996_mac_twt_duration_align(iter->duration);
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3130
if (list_is_last(&iter->list, &dev->twt_list))
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
3135
list_add(&flow->list, &iter->list);
drivers/net/wireless/mediatek/mt76/mt7996/main.c
408
struct ieee80211_bss_conf *iter;
drivers/net/wireless/mediatek/mt76/mt7996/main.c
412
for_each_vif_active_link(vif, iter, link_id) {
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
254
struct sk_buff *iter;
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
287
skb_walk_frags(e->skb, iter) {
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
288
memcpy(sdio->xmit_buf + len, iter->data, iter->len);
drivers/net/wireless/mediatek/mt76/sdio_txrx.c
289
len += iter->len;
drivers/net/wireless/mediatek/mt76/tx.c
782
struct sk_buff *iter, *last = skb;
drivers/net/wireless/mediatek/mt76/tx.c
787
skb_walk_frags(skb, iter) {
drivers/net/wireless/mediatek/mt76/tx.c
788
last = iter;
drivers/net/wireless/mediatek/mt76/tx.c
789
if (!iter->next) {
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
511
struct sk_buff *skb, *iter;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
521
skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) {
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
522
ieee80211hdr = (struct ieee80211_hdr *)iter->data;
drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
536
skb = iter;
drivers/net/wireless/realtek/rtlwifi/base.c
1989
struct rtl_bssid_entry *entry = NULL, *iter;
drivers/net/wireless/realtek/rtlwifi/base.c
2002
list_for_each_entry(iter, &rtlpriv->scan_list.list, list) {
drivers/net/wireless/realtek/rtlwifi/base.c
2003
if (memcmp(iter->bssid, hdr->addr3, ETH_ALEN) == 0) {
drivers/net/wireless/realtek/rtlwifi/base.c
2004
list_del_init(&iter->list);
drivers/net/wireless/realtek/rtlwifi/base.c
2005
entry = iter;
drivers/net/wireless/realtek/rtw88/fw.c
1627
struct sk_buff *iter;
drivers/net/wireless/realtek/rtw88/fw.c
1646
iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
drivers/net/wireless/realtek/rtw88/fw.c
1647
if (!iter) {
drivers/net/wireless/realtek/rtw88/fw.c
1656
rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
drivers/net/wireless/realtek/rtw88/fw.c
1658
rsvd_pkt->skb = iter;
drivers/net/wireless/realtek/rtw88/fw.c
1675
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
drivers/net/wireless/realtek/rtw88/fw.c
1678
total_page += rtw_len_to_page(iter->len, page_size);
drivers/net/wireless/st/cw1200/queue.c
103
--queue->link_map_cache[iter->txpriv.link_id];
drivers/net/wireless/st/cw1200/queue.c
106
if (!--stats->link_map_cache[iter->txpriv.link_id])
drivers/net/wireless/st/cw1200/queue.c
110
cw1200_queue_register_post_gc(head, iter);
drivers/net/wireless/st/cw1200/queue.c
111
iter->skb = NULL;
drivers/net/wireless/st/cw1200/queue.c
112
list_move_tail(&iter->head, &queue->free_pool);
drivers/net/wireless/st/cw1200/queue.c
94
struct cw1200_queue_item *item = NULL, *iter, *tmp;
drivers/net/wireless/st/cw1200/queue.c
97
list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
drivers/net/wireless/st/cw1200/queue.c
98
if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
drivers/net/wireless/st/cw1200/queue.c
99
item = iter;
drivers/net/wireless/ti/wlcore/main.c
2856
struct wl12xx_vif *iter;
drivers/net/wireless/ti/wlcore/main.c
2870
wl12xx_for_each_wlvif(wl, iter) {
drivers/net/wireless/ti/wlcore/main.c
2871
if (iter != wlvif)
drivers/net/wireless/ti/wlcore/main.c
2877
WARN_ON(iter != wlvif);
drivers/net/wwan/wwan_core.c
416
struct class_dev_iter iter;
drivers/net/wwan/wwan_core.c
427
class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type);
drivers/net/wwan/wwan_core.c
428
while ((dev = class_dev_iter_next(&iter))) {
drivers/net/wwan/wwan_core.c
437
class_dev_iter_exit(&iter);
drivers/nvdimm/btt.c
1438
struct bvec_iter iter;
drivers/nvdimm/btt.c
1450
bio_for_each_segment(bvec, bio, iter) {
drivers/nvdimm/btt.c
1462
bio_op(bio), iter.bi_sector);
drivers/nvdimm/btt.c
1468
(unsigned long long) iter.bi_sector, len);
drivers/nvdimm/pmem.c
207
struct bvec_iter iter;
drivers/nvdimm/pmem.c
217
bio_for_each_segment(bvec, bio, iter) {
drivers/nvdimm/pmem.c
220
iter.bi_sector, bvec.bv_len);
drivers/nvdimm/pmem.c
223
iter.bi_sector, bvec.bv_len);
drivers/nvme/host/ioctl.c
117
struct iov_iter *iter, unsigned int flags)
drivers/nvme/host/ioctl.c
139
if (iter)
drivers/nvme/host/ioctl.c
140
ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
drivers/nvme/host/ioctl.c
455
struct iov_iter iter;
drivers/nvme/host/ioctl.c
498
ddir, &iter, issue_flags);
drivers/nvme/host/ioctl.c
501
ddir, &iter, ioucmd, issue_flags);
drivers/nvme/host/ioctl.c
505
map_iter = &iter;
drivers/nvme/host/pci.c
1000
struct blk_dma_iter *iter)
drivers/nvme/host/pci.c
1009
if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
drivers/nvme/host/pci.c
1010
return iter->status;
drivers/nvme/host/pci.c
1018
prp1_dma = iter->addr;
drivers/nvme/host/pci.c
1020
(iter->addr & (NVME_CTRL_PAGE_SIZE - 1)));
drivers/nvme/host/pci.c
1022
iter->addr += prp_len;
drivers/nvme/host/pci.c
1023
iter->len -= prp_len;
drivers/nvme/host/pci.c
1028
if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
drivers/nvme/host/pci.c
1029
if (WARN_ON_ONCE(!iter->status))
drivers/nvme/host/pci.c
1039
prp2_dma = iter->addr;
drivers/nvme/host/pci.c
1051
iter->status = BLK_STS_RESOURCE;
drivers/nvme/host/pci.c
1058
prp_list[i++] = cpu_to_le64(iter->addr);
drivers/nvme/host/pci.c
1060
if (WARN_ON_ONCE(iter->len < prp_len))
drivers/nvme/host/pci.c
1064
iter->addr += prp_len;
drivers/nvme/host/pci.c
1065
iter->len -= prp_len;
drivers/nvme/host/pci.c
1070
if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
drivers/nvme/host/pci.c
1071
if (WARN_ON_ONCE(!iter->status))
drivers/nvme/host/pci.c
1089
iter->status = BLK_STS_RESOURCE;
drivers/nvme/host/pci.c
1107
if (unlikely(iter->status))
drivers/nvme/host/pci.c
1109
return iter->status;
drivers/nvme/host/pci.c
1119
struct blk_dma_iter *iter)
drivers/nvme/host/pci.c
1121
sge->addr = cpu_to_le64(iter->addr);
drivers/nvme/host/pci.c
1122
sge->length = cpu_to_le32(iter->len);
drivers/nvme/host/pci.c
1135
struct blk_dma_iter *iter)
drivers/nvme/host/pci.c
1148
nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter);
drivers/nvme/host/pci.c
1149
iod->total_len += iter->len;
drivers/nvme/host/pci.c
1164
iter->status = BLK_STS_IOERR;
drivers/nvme/host/pci.c
1167
nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
drivers/nvme/host/pci.c
1168
iod->total_len += iter->len;
drivers/nvme/host/pci.c
1169
} while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter));
drivers/nvme/host/pci.c
1172
if (unlikely(iter->status))
drivers/nvme/host/pci.c
1174
return iter->status;
drivers/nvme/host/pci.c
1222
struct blk_dma_iter iter;
drivers/nvme/host/pci.c
1235
if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
drivers/nvme/host/pci.c
1236
return iter.status;
drivers/nvme/host/pci.c
1238
switch (iter.p2pdma.map) {
drivers/nvme/host/pci.c
1254
return nvme_pci_setup_data_sgl(req, &iter);
drivers/nvme/host/pci.c
1255
return nvme_pci_setup_data_prp(req, &iter);
drivers/nvme/host/pci.c
1265
struct blk_dma_iter iter;
drivers/nvme/host/pci.c
1270
&iod->meta_dma_state, &iter))
drivers/nvme/host/pci.c
1271
return iter.status;
drivers/nvme/host/pci.c
1273
switch (iter.p2pdma.map) {
drivers/nvme/host/pci.c
1307
iod->cmd.common.metadata = cpu_to_le64(iter.addr);
drivers/nvme/host/pci.c
1308
iod->meta_total_len = iter.len;
drivers/nvme/host/pci.c
1309
iod->meta_dma = iter.addr;
drivers/nvme/host/pci.c
1324
iod->meta_total_len = iter.len;
drivers/nvme/host/pci.c
1325
nvme_pci_sgl_set_data(sg_list, &iter);
drivers/nvme/host/pci.c
1331
nvme_pci_sgl_set_data(&sg_list[++i], &iter);
drivers/nvme/host/pci.c
1332
iod->meta_total_len += iter.len;
drivers/nvme/host/pci.c
1333
} while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter));
drivers/nvme/host/pci.c
1336
if (unlikely(iter.status))
drivers/nvme/host/pci.c
1338
return iter.status;
drivers/nvme/host/pci.c
965
struct blk_dma_iter *iter)
drivers/nvme/host/pci.c
978
iter->status = BLK_STS_RESOURCE;
drivers/nvme/host/pci.c
983
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
drivers/nvme/host/pci.c
984
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
drivers/nvme/host/pci.c
990
struct blk_dma_iter *iter)
drivers/nvme/host/pci.c
992
if (iter->len)
drivers/nvme/host/pci.c
994
if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
drivers/nvme/host/pci.c
996
return nvme_pci_prp_save_mapping(req, dma_dev, iter);
drivers/nvme/host/tcp.c
120
struct iov_iter iter;
drivers/nvme/host/tcp.c
313
return req->iter.bvec->bv_page;
drivers/nvme/host/tcp.c
318
return req->iter.bvec->bv_offset + req->iter.iov_offset;
drivers/nvme/host/tcp.c
323
return min_t(size_t, iov_iter_single_seg_count(&req->iter),
drivers/nvme/host/tcp.c
367
iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
drivers/nvme/host/tcp.c
368
req->iter.iov_offset = offset;
drivers/nvme/host/tcp.c
376
iov_iter_advance(&req->iter, len);
drivers/nvme/host/tcp.c
377
if (!iov_iter_count(&req->iter) &&
drivers/nvme/host/tcp.c
926
if (!iov_iter_count(&req->iter)) {
drivers/nvme/host/tcp.c
945
iov_iter_count(&req->iter));
drivers/nvme/host/tcp.c
949
&req->iter, recv_len, &queue->rcv_crc);
drivers/nvme/host/tcp.c
952
&req->iter, recv_len);
drivers/nvme/target/core.c
1563
struct radix_tree_iter iter;
drivers/nvme/target/core.c
1568
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
drivers/nvme/target/io-cmd-file.c
100
return call_iter(iocb, &iter);
drivers/nvme/target/io-cmd-file.c
80
ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
drivers/nvme/target/io-cmd-file.c
81
struct iov_iter iter;
drivers/nvme/target/io-cmd-file.c
94
iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
drivers/nvmem/core.c
1373
struct nvmem_cell_entry *iter, *cell = NULL;
drivers/nvmem/core.c
1376
list_for_each_entry(iter, &nvmem->cells, node) {
drivers/nvmem/core.c
1377
if (np == iter->np) {
drivers/nvmem/core.c
1378
cell = iter;
drivers/nvmem/core.c
722
struct nvmem_cell_entry *iter, *cell = NULL;
drivers/nvmem/core.c
725
list_for_each_entry(iter, &nvmem->cells, node) {
drivers/nvmem/core.c
726
if (strcmp(cell_id, iter->name) == 0) {
drivers/nvmem/core.c
727
cell = iter;
drivers/opp/core.c
1800
struct dev_pm_opp *opp = NULL, *iter;
drivers/opp/core.c
1812
list_for_each_entry(iter, &opp_table->opp_list, node) {
drivers/opp/core.c
1813
if (iter->rates[0] == freq) {
drivers/opp/core.c
1814
opp = iter;
drivers/opp/debugfs.c
218
struct opp_device *new_dev = NULL, *iter;
drivers/opp/debugfs.c
223
list_for_each_entry(iter, &opp_table->dev_list, node)
drivers/opp/debugfs.c
224
if (iter != opp_dev) {
drivers/opp/debugfs.c
225
new_dev = iter;
drivers/pci/controller/pci-hyperv.c
2660
struct hv_pci_dev *iter, *hpdev = NULL;
drivers/pci/controller/pci-hyperv.c
2663
list_for_each_entry(iter, &hbus->children, list_entry) {
drivers/pci/controller/pci-hyperv.c
2664
if (iter->desc.win_slot.slot == wslot) {
drivers/pci/controller/pci-hyperv.c
2665
hpdev = iter;
drivers/pinctrl/core.c
716
struct radix_tree_iter iter;
drivers/pinctrl/core.c
719
radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0)
drivers/pinctrl/core.c
720
radix_tree_delete(&pctldev->pin_group_tree, iter.index);
drivers/pinctrl/pinmux.c
1005
struct radix_tree_iter iter;
drivers/pinctrl/pinmux.c
1008
radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0)
drivers/pinctrl/pinmux.c
1009
radix_tree_delete(&pctldev->pin_function_tree, iter.index);
drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
584
struct bulk_waiter_node *waiter = NULL, *iter;
drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
594
list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
595
if (iter->pid == current->pid) {
drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
596
list_del(&iter->list);
drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c
597
waiter = iter;
drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
291
struct bulk_waiter_node *waiter = NULL, *iter;
drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
317
list_for_each_entry(iter, &instance->bulk_waiter_list,
drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
319
if (iter->pid == current->pid) {
drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
320
list_del(&iter->list);
drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c
321
waiter = iter;
drivers/platform/x86/intel/pmc/core.c
331
unsigned int index, iter, idx, ip = 0;
drivers/platform/x86/intel/pmc/core.c
337
iter = pmc->map->ppfear0_offset;
drivers/platform/x86/intel/pmc/core.c
340
index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
drivers/platform/x86/intel/pmc/core.c
341
pf_regs[index] = pmc_core_reg_read_byte(pmc, iter);
drivers/pmdomain/qcom/cpr.c
1292
struct corner *iter;
drivers/pmdomain/qcom/cpr.c
1314
for (iter = drv->corners; iter <= end; iter++) {
drivers/pmdomain/qcom/cpr.c
1315
if (iter->freq > rate)
drivers/pmdomain/qcom/cpr.c
1318
if (iter->freq == rate) {
drivers/pmdomain/qcom/cpr.c
1319
drv->corner = iter;
drivers/pmdomain/qcom/cpr.c
1322
if (iter->freq < rate)
drivers/pmdomain/qcom/cpr.c
1323
drv->corner = iter;
drivers/power/sequencing/core.c
328
struct radix_tree_iter iter;
drivers/power/sequencing/core.c
337
radix_tree_for_each_slot(slot, &visited_units, &iter, 0)
drivers/power/sequencing/core.c
338
radix_tree_delete(&visited_units, iter.index);
drivers/power/sequencing/core.c
450
struct radix_tree_iter iter;
drivers/power/sequencing/core.c
456
radix_tree_for_each_slot(slot, &processed_units, &iter, 0)
drivers/power/sequencing/core.c
457
radix_tree_delete(&processed_units, iter.index);
drivers/resctrl/mpam_devices.c
132
struct mpam_garbage *iter, *tmp;
drivers/resctrl/mpam_devices.c
140
llist_for_each_entry_safe(iter, tmp, to_free, llist) {
drivers/resctrl/mpam_devices.c
141
if (iter->pdev)
drivers/resctrl/mpam_devices.c
142
devm_kfree(&iter->pdev->dev, iter->to_free);
drivers/resctrl/mpam_devices.c
144
kfree(iter->to_free);
drivers/s390/block/dasd_diag.c
515
struct req_iterator iter;
drivers/s390/block/dasd_diag.c
536
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_diag.c
555
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
3261
struct req_iterator iter;
drivers/s390/block/dasd_eckd.c
3308
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
3969
struct req_iterator iter;
drivers/s390/block/dasd_eckd.c
3991
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4064
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4155
struct req_iterator iter;
drivers/s390/block/dasd_eckd.c
4229
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4485
struct req_iterator iter;
drivers/s390/block/dasd_eckd.c
4522
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4572
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4605
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4734
struct req_iterator iter;
drivers/s390/block/dasd_eckd.c
4838
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_eckd.c
4881
struct req_iterator iter;
drivers/s390/block/dasd_eckd.c
4899
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_fba.c
434
struct req_iterator iter;
drivers/s390/block/dasd_fba.c
456
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_fba.c
498
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dasd_fba.c
566
struct req_iterator iter;
drivers/s390/block/dasd_fba.c
580
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/dcssblk.c
884
struct bvec_iter iter;
drivers/s390/block/dcssblk.c
914
bio_for_each_segment(bvec, bio, iter) {
drivers/s390/block/scm_blk.c
184
struct req_iterator iter;
drivers/s390/block/scm_blk.c
199
rq_for_each_segment(bv, req, iter) {
drivers/s390/block/scm_blk.c
52
struct list_head *iter, *safe;
drivers/s390/block/scm_blk.c
56
list_for_each_safe(iter, safe, &inactive_requests) {
drivers/s390/block/scm_blk.c
57
scmrq = list_entry(iter, struct scm_request, list);
drivers/s390/char/zcore.c
100
struct iov_iter iter;
drivers/s390/char/zcore.c
105
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
drivers/s390/char/zcore.c
106
if (memcpy_hsa_iter(&iter, src, count) < count)
drivers/s390/char/zcore.c
64
size_t memcpy_hsa_iter(struct iov_iter *iter, unsigned long src, size_t count)
drivers/s390/char/zcore.c
80
copied = copy_to_iter(hsa_buf + offset, bytes, iter);
drivers/s390/cio/blacklist.c
292
struct ccwdev_iter *iter = s->private;
drivers/s390/cio/blacklist.c
296
memset(iter, 0, sizeof(*iter));
drivers/s390/cio/blacklist.c
297
iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
drivers/s390/cio/blacklist.c
298
iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
drivers/s390/cio/blacklist.c
299
return iter;
drivers/s390/cio/blacklist.c
310
struct ccwdev_iter *iter;
drivers/s390/cio/blacklist.c
316
iter = it;
drivers/s390/cio/blacklist.c
317
if (iter->devno == __MAX_SUBCHANNEL) {
drivers/s390/cio/blacklist.c
318
iter->devno = 0;
drivers/s390/cio/blacklist.c
319
iter->ssid++;
drivers/s390/cio/blacklist.c
320
if (iter->ssid > __MAX_SSID)
drivers/s390/cio/blacklist.c
323
iter->devno++;
drivers/s390/cio/blacklist.c
324
return iter;
drivers/s390/cio/blacklist.c
330
struct ccwdev_iter *iter;
drivers/s390/cio/blacklist.c
332
iter = it;
drivers/s390/cio/blacklist.c
333
if (!is_blacklisted(iter->ssid, iter->devno))
drivers/s390/cio/blacklist.c
336
if (!iter->in_range) {
drivers/s390/cio/blacklist.c
338
if ((iter->devno == __MAX_SUBCHANNEL) ||
drivers/s390/cio/blacklist.c
339
!is_blacklisted(iter->ssid, iter->devno + 1)) {
drivers/s390/cio/blacklist.c
341
seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
drivers/s390/cio/blacklist.c
344
iter->in_range = 1;
drivers/s390/cio/blacklist.c
345
seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
drivers/s390/cio/blacklist.c
348
if ((iter->devno == __MAX_SUBCHANNEL) ||
drivers/s390/cio/blacklist.c
349
!is_blacklisted(iter->ssid, iter->devno + 1)) {
drivers/s390/cio/blacklist.c
351
iter->in_range = 0;
drivers/s390/cio/blacklist.c
352
seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
drivers/s390/cio/vfio_ccw_cp.c
492
struct ccwchain *iter;
drivers/s390/cio/vfio_ccw_cp.c
495
list_for_each_entry(iter, &cp->ccwchain_list, next) {
drivers/s390/cio/vfio_ccw_cp.c
496
ccw_head = iter->ch_iova;
drivers/s390/cio/vfio_ccw_cp.c
497
if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
drivers/s390/cio/vfio_ccw_cp.c
500
ccw->cda = virt_to_dma32((void *)iter->ch_ccw + offset);
drivers/s390/net/qeth_l2_main.c
759
struct list_head *iter;
drivers/s390/net/qeth_l2_main.c
775
iter = &brdev->adj_list.lower;
drivers/s390/net/qeth_l2_main.c
776
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
drivers/s390/net/qeth_l2_main.c
796
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
drivers/s390/net/qeth_l2_main.c
856
struct list_head *iter;
drivers/s390/net/qeth_l2_main.c
871
iter = &brdev->adj_list.lower;
drivers/s390/net/qeth_l2_main.c
872
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
drivers/s390/net/qeth_l2_main.c
885
lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
drivers/scsi/aha1542.c
266
struct req_iterator iter;
drivers/scsi/aha1542.c
269
rq_for_each_segment(bv, rq, iter) {
drivers/scsi/aha1542.c
450
struct req_iterator iter;
drivers/scsi/aha1542.c
453
rq_for_each_segment(bv, rq, iter) {
drivers/scsi/dc395x.c
3030
struct DeviceCtlBlk *p = NULL, *iter;
drivers/scsi/dc395x.c
3032
list_for_each_entry(iter, &acb->dcb_list, list)
drivers/scsi/dc395x.c
3033
if (iter->target_id == dcb->target_id) {
drivers/scsi/dc395x.c
3034
p = iter;
drivers/scsi/fnic/fnic.h
573
struct fnic_scsi_iter_data *iter = iter_data;
drivers/scsi/fnic/fnic.h
575
return iter->fn(iter->fnic, sc, iter->data1, iter->data2);
drivers/scsi/lpfc/lpfc_sli.c
22081
struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
drivers/scsi/lpfc/lpfc_sli.c
22090
list_for_each_entry_safe(iter, lpfc_ncmd_next,
drivers/scsi/lpfc/lpfc_sli.c
22092
list_del(&iter->list);
drivers/scsi/lpfc/lpfc_sli.c
22094
lpfc_ncmd = iter;
drivers/scsi/qedf/qedf_dbg.c
156
qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
drivers/scsi/qedf/qedf_dbg.c
160
for (; iter->name; iter++) {
drivers/scsi/qedf/qedf_dbg.c
162
iter->attr);
drivers/scsi/qedf/qedf_dbg.c
165
iter->name, ret);
drivers/scsi/qedf/qedf_dbg.c
171
qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
drivers/scsi/qedf/qedf_dbg.c
173
for (; iter->name; iter++)
drivers/scsi/qedf/qedf_dbg.c
174
sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
drivers/scsi/qedf/qedf_dbg.h
113
struct sysfs_bin_attrs *iter);
drivers/scsi/qedf/qedf_dbg.h
115
struct sysfs_bin_attrs *iter);
drivers/scsi/qla2xxx/qla_attr.c
1021
struct sysfs_entry *iter;
drivers/scsi/qla2xxx/qla_attr.c
1024
for (iter = bin_file_entries; iter->name; iter++) {
drivers/scsi/qla2xxx/qla_attr.c
1025
if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
drivers/scsi/qla2xxx/qla_attr.c
1027
if (iter->type == 2 && !IS_QLA25XX(vha->hw))
drivers/scsi/qla2xxx/qla_attr.c
1029
if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
drivers/scsi/qla2xxx/qla_attr.c
1033
iter->attr);
drivers/scsi/qla2xxx/qla_attr.c
1037
iter->name, ret);
drivers/scsi/qla2xxx/qla_attr.c
1041
iter->name);
drivers/scsi/qla2xxx/qla_attr.c
1049
struct sysfs_entry *iter;
drivers/scsi/qla2xxx/qla_attr.c
1052
for (iter = bin_file_entries; iter->name; iter++) {
drivers/scsi/qla2xxx/qla_attr.c
1053
if (iter->type && !IS_FWI2_CAPABLE(ha))
drivers/scsi/qla2xxx/qla_attr.c
1055
if (iter->type == 2 && !IS_QLA25XX(ha))
drivers/scsi/qla2xxx/qla_attr.c
1057
if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
drivers/scsi/qla2xxx/qla_attr.c
1061
iter->attr);
drivers/scsi/qla2xxx/qla_attr.c
237
__le32 *iter = (__force __le32 *)buf;
drivers/scsi/qla2xxx/qla_attr.c
241
for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
drivers/scsi/qla2xxx/qla_attr.c
242
chksum += le32_to_cpu(*iter);
drivers/scsi/qla2xxx/qla_attr.c
244
*iter = cpu_to_le32(chksum);
drivers/scsi/qla2xxx/qla_attr.c
246
uint8_t *iter;
drivers/scsi/qla2xxx/qla_attr.c
249
iter = (uint8_t *)buf;
drivers/scsi/qla2xxx/qla_attr.c
252
chksum += *iter++;
drivers/scsi/qla2xxx/qla_attr.c
254
*iter = chksum;
drivers/scsi/qla2xxx/qla_inline.h
73
uint32_t iter = bsize >> 2;
drivers/scsi/qla2xxx/qla_inline.h
75
for (; iter ; iter--)
drivers/scsi/qla2xxx/qla_inline.h
86
uint32_t iter = bsize >> 2;
drivers/scsi/qla2xxx/qla_inline.h
88
for ( ; iter--; isrc++)
drivers/scsi/qla2xxx/qla_isr.c
2794
uint16_t iter;
drivers/scsi/qla2xxx/qla_isr.c
2811
iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
drivers/scsi/qla2xxx/qla_isr.c
2812
for (; iter; iter--)
drivers/scsi/qla2xxx/qla_isr.c
347
unsigned long iter;
drivers/scsi/qla2xxx/qla_isr.c
366
for (iter = 50; iter--; ) {
drivers/scsi/qla2xxx/qla_isr.c
4250
unsigned long iter;
drivers/scsi/qla2xxx/qla_isr.c
4274
for (iter = 50; iter--; ) {
drivers/scsi/qla2xxx/qla_isr.c
460
unsigned long iter;
drivers/scsi/qla2xxx/qla_isr.c
481
for (iter = 50; iter--; ) {
drivers/scsi/qla2xxx/qla_mbx.c
3196
uint32_t *iter = (uint32_t *)stats;
drivers/scsi/qla2xxx/qla_mbx.c
3197
ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
drivers/scsi/qla2xxx/qla_mbx.c
3237
for ( ; dwords--; iter++)
drivers/scsi/qla2xxx/qla_mbx.c
3238
le32_to_cpus(iter);
drivers/scsi/qla2xxx/qla_mbx.c
3255
uint32_t *iter = (uint32_t *)stats;
drivers/scsi/qla2xxx/qla_mbx.c
3256
ushort dwords = sizeof(*stats)/sizeof(*iter);
drivers/scsi/qla2xxx/qla_mbx.c
3282
for ( ; dwords--; iter++)
drivers/scsi/qla2xxx/qla_mbx.c
3283
le32_to_cpus(iter);
drivers/scsi/qla2xxx/qla_mbx.c
6917
uint16_t iter, addr, offset;
drivers/scsi/qla2xxx/qla_mbx.c
6928
for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
drivers/scsi/qla2xxx/qla_mbx.c
6929
if (iter == 4) {
drivers/scsi/qla2xxx/qla_mr.c
2881
unsigned long iter;
drivers/scsi/qla2xxx/qla_mr.c
2905
for (iter = 50; iter--; clr_intr = 0) {
drivers/scsi/qla2xxx/qla_nx.c
1966
unsigned long iter;
drivers/scsi/qla2xxx/qla_nx.c
1999
for (iter = 1; iter--; ) {
drivers/scsi/qla2xxx/qla_nx2.c
3884
unsigned long iter;
drivers/scsi/qla2xxx/qla_nx2.c
3935
for (iter = 1; iter--; ) {
drivers/scsi/qla2xxx/qla_sup.c
3140
uint32_t istart, iend, iter, vend;
drivers/scsi/qla2xxx/qla_sup.c
3153
iter = istart;
drivers/scsi/qla2xxx/qla_sup.c
3154
while ((iter < iend) && !do_next) {
drivers/scsi/qla2xxx/qla_sup.c
3155
iter++;
drivers/scsi/qla2xxx/qla_sup.c
3156
if (qla2x00_read_flash_byte(ha, iter) == '/') {
drivers/scsi/qla2xxx/qla_sup.c
3157
if (qla2x00_read_flash_byte(ha, iter + 2) ==
drivers/scsi/qla2xxx/qla_sup.c
3161
iter + 3) == '/')
drivers/scsi/qla2xxx/qla_sup.c
3170
while ((iter > istart) && !do_next) {
drivers/scsi/qla2xxx/qla_sup.c
3171
iter--;
drivers/scsi/qla2xxx/qla_sup.c
3172
if (qla2x00_read_flash_byte(ha, iter) == ' ')
drivers/scsi/qla2xxx/qla_sup.c
3182
vend = iter - 1;
drivers/scsi/qla2xxx/qla_sup.c
3184
while ((iter > istart) && !do_next) {
drivers/scsi/qla2xxx/qla_sup.c
3185
iter--;
drivers/scsi/qla2xxx/qla_sup.c
3186
rbyte = qla2x00_read_flash_byte(ha, iter);
drivers/scsi/qla2xxx/qla_sup.c
3194
iter++;
drivers/scsi/qla2xxx/qla_sup.c
3195
if ((vend - iter) &&
drivers/scsi/qla2xxx/qla_sup.c
3196
((vend - iter) < sizeof(ha->fcode_revision))) {
drivers/scsi/qla2xxx/qla_sup.c
3198
while (iter <= vend) {
drivers/scsi/qla2xxx/qla_sup.c
3199
*vbyte++ = qla2x00_read_flash_byte(ha, iter);
drivers/scsi/qla2xxx/qla_sup.c
3200
iter++;
drivers/scsi/qla4xxx/ql4_attr.c
128
struct sysfs_entry *iter;
drivers/scsi/qla4xxx/ql4_attr.c
131
for (iter = bin_file_entries; iter->name; iter++) {
drivers/scsi/qla4xxx/ql4_attr.c
133
iter->attr);
drivers/scsi/qla4xxx/ql4_attr.c
137
iter->name, ret);
drivers/scsi/qla4xxx/ql4_attr.c
144
struct sysfs_entry *iter;
drivers/scsi/qla4xxx/ql4_attr.c
146
for (iter = bin_file_entries; iter->name; iter++)
drivers/scsi/qla4xxx/ql4_attr.c
148
iter->attr);
drivers/soc/qcom/pdr_interface.c
302
struct pdr_service *pds = NULL, *iter;
drivers/soc/qcom/pdr_interface.c
309
list_for_each_entry(iter, &pdr->lookups, node) {
drivers/soc/qcom/pdr_interface.c
310
if (strcmp(iter->service_path, ind_msg->service_path))
drivers/soc/qcom/pdr_interface.c
313
pds = iter;
drivers/soc/qcom/qcom-geni-se.c
534
int iter = (ceil_bpw * pack_words) / BITS_PER_BYTE;
drivers/soc/qcom/qcom-geni-se.c
537
if (iter <= 0 || iter > NUM_PACKING_VECTORS)
drivers/soc/qcom/qcom-geni-se.c
540
for (i = 0; i < iter; i++) {
drivers/soc/qcom/qcom-geni-se.c
554
cfg[iter - 1] |= PACKING_STOP_BIT;
drivers/soc/ti/knav_qmss_queue.c
760
struct knav_pool *pool, *pi = NULL, *iter;
drivers/soc/ti/knav_qmss_queue.c
818
list_for_each_entry(iter, &region->pools, region_inst) {
drivers/soc/ti/knav_qmss_queue.c
819
if ((iter->region_offset - last_offset) >= num_desc) {
drivers/soc/ti/knav_qmss_queue.c
820
pi = iter;
drivers/soc/ti/knav_qmss_queue.c
823
last_offset = iter->region_offset + iter->num_desc;
drivers/spi/spi-pci1xxxx.c
1005
store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
drivers/spi/spi-pci1xxxx.c
1008
SPI_MST_EVENT_MASK_REG_OFFSET(iter));
drivers/spi/spi-pci1xxxx.c
280
u8 iter, irq_index;
drivers/spi/spi-pci1xxxx.c
286
for (iter = 0; iter < spi_bus->total_hw_instances; iter++) {
drivers/spi/spi-pci1xxxx.c
287
spi_sub_ptr = spi_bus->spi_int[iter];
drivers/spi/spi-pci1xxxx.c
289
if (iter == 0) {
drivers/spi/spi-pci1xxxx.c
311
writel((regval | (data << (iter * 16))), spi_bus->dma_offset_bar +
drivers/spi/spi-pci1xxxx.c
318
writel(regval | (data << (iter * 16)), spi_bus->dma_offset_bar +
drivers/spi/spi-pci1xxxx.c
328
u8 iter, irq_index;
drivers/spi/spi-pci1xxxx.c
341
for (iter = 0; iter < hw_inst; iter++) {
drivers/spi/spi-pci1xxxx.c
342
spi_sub_ptr = spi_bus->spi_int[iter];
drivers/spi/spi-pci1xxxx.c
802
u8 hw_inst_cnt, iter, start, only_sec_inst;
drivers/spi/spi-pci1xxxx.c
828
for (iter = 0; iter < hw_inst_cnt; iter++) {
drivers/spi/spi-pci1xxxx.c
829
spi_bus->spi_int[iter] = devm_kzalloc(&pdev->dev,
drivers/spi/spi-pci1xxxx.c
832
if (!spi_bus->spi_int[iter])
drivers/spi/spi-pci1xxxx.c
834
spi_sub_ptr = spi_bus->spi_int[iter];
drivers/spi/spi-pci1xxxx.c
842
if (!iter) {
drivers/spi/spi-pci1xxxx.c
898
if (iter == 1) {
drivers/spi/spi-pci1xxxx.c
907
spi_sub_ptr->irq[0] = pci_irq_vector(pdev, iter);
drivers/spi/spi-pci1xxxx.c
976
u8 iter;
drivers/spi/spi-pci1xxxx.c
978
for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
drivers/spi/spi-pci1xxxx.c
979
spi_sub_ptr = spi_ptr->spi_int[iter];
drivers/spi/spi-pci1xxxx.c
982
SPI_MST_EVENT_MASK_REG_OFFSET(iter));
drivers/spi/spi-pci1xxxx.c
985
store_restore_config(spi_ptr, spi_sub_ptr, iter, 0);
drivers/spi/spi-pci1xxxx.c
996
u8 iter;
drivers/spi/spi-pci1xxxx.c
998
for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
drivers/spi/spi-pci1xxxx.c
999
spi_sub_ptr = spi_ptr->spi_int[iter];
drivers/spi/spidev.c
571
struct spidev_data *spidev = NULL, *iter;
drivers/spi/spidev.c
576
list_for_each_entry(iter, &device_list, device_entry) {
drivers/spi/spidev.c
577
if (iter->devt == inode->i_rdev) {
drivers/spi/spidev.c
579
spidev = iter;
drivers/staging/greybus/audio_codec.c
500
struct gbaudio_module_info *module = NULL, *iter;
drivers/staging/greybus/audio_codec.c
514
list_for_each_entry(iter, &codec->module_list, list) {
drivers/staging/greybus/audio_codec.c
516
data = find_data(iter, dai->id);
drivers/staging/greybus/audio_codec.c
518
module = iter;
drivers/staging/greybus/audio_codec.c
568
struct gbaudio_module_info *module = NULL, *iter;
drivers/staging/greybus/audio_codec.c
597
list_for_each_entry(iter, &codec->module_list, list) {
drivers/staging/greybus/audio_codec.c
599
data = find_data(iter, dai->id);
drivers/staging/greybus/audio_codec.c
601
module = iter;
drivers/target/sbp/sbp_target.c
1247
struct sg_mapping_iter iter;
drivers/target/sbp/sbp_target.c
1285
sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
drivers/target/sbp/sbp_target.c
1298
sg_miter_next(&iter);
drivers/target/sbp/sbp_target.c
1300
tfr_length = min3(length, max_payload, (int)iter.length);
drivers/target/sbp/sbp_target.c
1306
offset, iter.addr, tfr_length);
drivers/target/sbp/sbp_target.c
1313
iter.consumed = tfr_length;
drivers/target/sbp/sbp_target.c
1316
sg_miter_stop(&iter);
drivers/target/target_core_device.c
913
struct devices_idr_iter *iter = data;
drivers/target/target_core_device.c
932
ret = iter->fn(dev, iter->data);
drivers/target/target_core_device.c
950
struct devices_idr_iter iter = { .fn = fn, .data = data };
drivers/target/target_core_device.c
954
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
drivers/target/target_core_file.c
274
struct iov_iter iter;
drivers/target/target_core_file.c
289
iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
drivers/target/target_core_file.c
302
ret = file->f_op->write_iter(&aio_cmd->iocb, &iter);
drivers/target/target_core_file.c
304
ret = file->f_op->read_iter(&aio_cmd->iocb, &iter);
drivers/target/target_core_file.c
317
struct iov_iter iter;
drivers/target/target_core_file.c
334
iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
drivers/target/target_core_file.c
336
ret = vfs_iter_write(fd, &iter, &pos, 0);
drivers/target/target_core_file.c
338
ret = vfs_iter_read(fd, &iter, &pos, 0);
drivers/target/target_core_file.c
372
ret += iov_iter_zero(data_length - ret, &iter);
drivers/target/target_core_file.c
434
struct iov_iter iter;
drivers/target/target_core_file.c
469
iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
drivers/target/target_core_file.c
470
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
drivers/tee/tee_shm.c
401
register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
drivers/tee/tee_shm.c
433
addr = untagged_addr((unsigned long)iter_iov_addr(iter));
drivers/tee/tee_shm.c
435
num_pages = iov_iter_npages(iter, INT_MAX);
drivers/tee/tee_shm.c
447
len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
drivers/tee/tee_shm.c
475
if (!iov_iter_is_kvec(iter))
drivers/tee/tee_shm.c
502
struct iov_iter iter;
drivers/tee/tee_shm.c
515
iov_iter_ubuf(&iter, ITER_DEST, (void __user *)addr, length);
drivers/tee/tee_shm.c
516
shm = register_shm_helper(ctx, &iter, flags, id);
drivers/tee/tee_shm.c
550
struct iov_iter iter;
drivers/tee/tee_shm.c
554
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, length);
drivers/tee/tee_shm.c
556
return register_shm_helper(ctx, &iter, flags, -1);
drivers/thunderbolt/ctl.c
176
struct tb_cfg_request *req = NULL, *iter;
drivers/thunderbolt/ctl.c
179
list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
drivers/thunderbolt/ctl.c
180
tb_cfg_request_get(iter);
drivers/thunderbolt/ctl.c
181
if (iter->match(iter, pkg)) {
drivers/thunderbolt/ctl.c
182
req = iter;
drivers/thunderbolt/ctl.c
185
tb_cfg_request_put(iter);
drivers/tty/tty_io.c
1105
ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
drivers/tty/tty_io.c
1121
res = file_tty_write(p, iocb, iter);
drivers/tty/tty_io.c
1125
return tty_write(iocb, iter);
drivers/usb/core/devio.c
1597
struct usb_memory *usbm = NULL, *iter;
drivers/usb/core/devio.c
1602
list_for_each_entry(iter, &ps->memory_list, memlist) {
drivers/usb/core/devio.c
1603
if (uurb_start >= iter->vm_start &&
drivers/usb/core/devio.c
1604
uurb_start < iter->vm_start + iter->size) {
drivers/usb/core/devio.c
1605
if (uurb->buffer_length > iter->vm_start + iter->size -
drivers/usb/core/devio.c
1609
usbm = iter;
drivers/usb/gadget/composite.c
1765
struct usb_function *iter;
drivers/usb/gadget/composite.c
2225
list_for_each_entry(iter, &cdev->config->functions, list) {
drivers/usb/gadget/composite.c
2226
if (test_bit(endp, iter->endpoints)) {
drivers/usb/gadget/composite.c
2227
f = iter;
drivers/usb/gadget/composite.c
951
struct usb_configuration *c = NULL, *iter;
drivers/usb/gadget/composite.c
957
list_for_each_entry(iter, &cdev->configs, list) {
drivers/usb/gadget/composite.c
958
if (iter->bConfigurationValue != number)
drivers/usb/gadget/composite.c
967
c = iter;
drivers/usb/gadget/configfs.c
1227
struct usb_configuration *c = NULL, *iter;
drivers/usb/gadget/configfs.c
1231
list_for_each_entry(iter, &cdev->configs, list) {
drivers/usb/gadget/configfs.c
1232
if (iter != &c_target->c)
drivers/usb/gadget/configfs.c
1234
c = iter;
drivers/usb/gadget/configfs.c
1619
struct list_head *iter;
drivers/usb/gadget/configfs.c
1625
list_for_each(iter, &gi->string_list)
drivers/usb/gadget/configfs.c
435
struct usb_function_instance *a_fi = NULL, *iter;
drivers/usb/gadget/configfs.c
451
list_for_each_entry(iter, &gi->available_func, cfs_list) {
drivers/usb/gadget/configfs.c
452
if (iter != fi)
drivers/usb/gadget/configfs.c
454
a_fi = iter;
drivers/usb/gadget/function/f_fs.c
757
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
drivers/usb/gadget/function/f_fs.c
759
ssize_t ret = copy_to_iter(data, data_len, iter);
drivers/usb/gadget/function/f_fs.c
763
if (iov_iter_count(iter))
drivers/usb/gadget/function/f_fs.c
910
struct iov_iter *iter)
drivers/usb/gadget/function/f_fs.c
922
ret = copy_to_iter(buf->data, buf->length, iter);
drivers/usb/gadget/function/f_fs.c
928
if (iov_iter_count(iter)) {
drivers/usb/gadget/function/f_fs.c
944
struct iov_iter *iter)
drivers/usb/gadget/function/f_fs.c
948
ssize_t ret = copy_to_iter(data, data_len, iter);
drivers/usb/gadget/function/f_fs.c
952
if (iov_iter_count(iter))
drivers/usb/gadget/function/uvc_configfs.c
1043
u8 *source_ids, *iter;
drivers/usb/gadget/function/uvc_configfs.c
1058
iter = source_ids = kcalloc(n, sizeof(u8), GFP_KERNEL);
drivers/usb/gadget/function/uvc_configfs.c
1064
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
drivers/usb/gadget/function/uvc_configfs.c
1122
u8 *bm_controls, *iter;
drivers/usb/gadget/function/uvc_configfs.c
1137
iter = bm_controls = kcalloc(n, sizeof(u8), GFP_KERNEL);
drivers/usb/gadget/function/uvc_configfs.c
1143
ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
drivers/usb/gadget/function/uvc_video.c
139
struct scatterlist *sg, *iter;
drivers/usb/gadget/function/uvc_video.c
163
for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
drivers/usb/gadget/function/uvc_video.c
170
sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
drivers/usb/gadget/udc/aspeed-vhub/epn.c
476
struct ast_vhub_req *req = NULL, *iter;
drivers/usb/gadget/udc/aspeed-vhub/epn.c
483
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/aspeed-vhub/epn.c
484
if (&iter->req != u_req)
drivers/usb/gadget/udc/aspeed-vhub/epn.c
486
req = iter;
drivers/usb/gadget/udc/at91_udc.c
708
struct at91_request *req = NULL, *iter;
drivers/usb/gadget/udc/at91_udc.c
721
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/at91_udc.c
722
if (&iter->req != _req)
drivers/usb/gadget/udc/at91_udc.c
724
req = iter;
drivers/usb/gadget/udc/atmel_usba_udc.c
862
struct usba_request *iter;
drivers/usb/gadget/udc/atmel_usba_udc.c
871
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/atmel_usba_udc.c
872
if (&iter->req != _req)
drivers/usb/gadget/udc/atmel_usba_udc.c
874
req = iter;
drivers/usb/gadget/udc/bdc/bdc_ep.c
1759
struct bdc_req *iter;
drivers/usb/gadget/udc/bdc/bdc_ep.c
1777
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/bdc/bdc_ep.c
1778
if (&iter->usb_req != _req)
drivers/usb/gadget/udc/bdc/bdc_ep.c
1780
req = iter;
drivers/usb/gadget/udc/dummy_hcd.c
763
struct dummy_request *req = NULL, *iter;
drivers/usb/gadget/udc/dummy_hcd.c
774
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/dummy_hcd.c
775
if (&iter->req != _req)
drivers/usb/gadget/udc/dummy_hcd.c
777
list_del_init(&iter->queue);
drivers/usb/gadget/udc/dummy_hcd.c
779
req = iter;
drivers/usb/gadget/udc/fsl_qe_udc.c
1781
struct qe_req *iter;
drivers/usb/gadget/udc/fsl_qe_udc.c
1790
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/fsl_qe_udc.c
1791
if (&iter->req != _req)
drivers/usb/gadget/udc/fsl_qe_udc.c
1793
req = iter;
drivers/usb/gadget/udc/fsl_udc_core.c
925
struct fsl_req *iter;
drivers/usb/gadget/udc/fsl_udc_core.c
947
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/fsl_udc_core.c
948
if (&iter->req != _req)
drivers/usb/gadget/udc/fsl_udc_core.c
950
req = iter;
drivers/usb/gadget/udc/goku_udc.c
812
struct goku_request *req = NULL, *iter;
drivers/usb/gadget/udc/goku_udc.c
836
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/goku_udc.c
837
if (&iter->req != _req)
drivers/usb/gadget/udc/goku_udc.c
839
req = iter;
drivers/usb/gadget/udc/gr_udc.c
1692
struct gr_request *req = NULL, *iter;
drivers/usb/gadget/udc/gr_udc.c
1712
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/gr_udc.c
1713
if (&iter->req != _req)
drivers/usb/gadget/udc/gr_udc.c
1715
req = iter;
drivers/usb/gadget/udc/lpc32xx_udc.c
1831
struct lpc32xx_request *req = NULL, *iter;
drivers/usb/gadget/udc/lpc32xx_udc.c
1841
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/lpc32xx_udc.c
1842
if (&iter->req != _req)
drivers/usb/gadget/udc/lpc32xx_udc.c
1844
req = iter;
drivers/usb/gadget/udc/max3420_udc.c
1047
struct max3420_req *iter;
drivers/usb/gadget/udc/max3420_udc.c
1054
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/max3420_udc.c
1055
if (iter != req)
drivers/usb/gadget/udc/max3420_udc.c
1058
t = iter;
drivers/usb/gadget/udc/net2280.c
1236
struct net2280_request *iter;
drivers/usb/gadget/udc/net2280.c
1262
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/net2280.c
1263
if (&iter->req != _req)
drivers/usb/gadget/udc/net2280.c
1265
req = iter;
drivers/usb/gadget/udc/omap_udc.c
1008
struct omap_req *req = NULL, *iter;
drivers/usb/gadget/udc/omap_udc.c
1017
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/omap_udc.c
1018
if (&iter->req != _req)
drivers/usb/gadget/udc/omap_udc.c
1020
req = iter;
drivers/usb/gadget/udc/pxa25x_udc.c
966
struct pxa25x_request *iter;
drivers/usb/gadget/udc/pxa25x_udc.c
976
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/pxa25x_udc.c
977
if (&iter->req != _req)
drivers/usb/gadget/udc/pxa25x_udc.c
979
req = iter;
drivers/usb/gadget/udc/pxa27x_udc.c
1162
struct pxa27x_request *req = NULL, *iter;
drivers/usb/gadget/udc/pxa27x_udc.c
1176
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/pxa27x_udc.c
1177
if (&iter->req != _req)
drivers/usb/gadget/udc/pxa27x_udc.c
1179
req = iter;
drivers/usb/gadget/udc/tegra-xudc.c
1429
struct tegra_xudc_request *r = NULL, *iter;
drivers/usb/gadget/udc/tegra-xudc.c
1435
list_for_each_entry(iter, &ep->queue, list) {
drivers/usb/gadget/udc/tegra-xudc.c
1436
if (iter != req)
drivers/usb/gadget/udc/tegra-xudc.c
1438
r = iter;
drivers/usb/gadget/udc/udc-xilinx.c
1142
struct xusb_req *iter;
drivers/usb/gadget/udc/udc-xilinx.c
1148
list_for_each_entry(iter, &ep->queue, queue) {
drivers/usb/gadget/udc/udc-xilinx.c
1149
if (&iter->usb_req != _req)
drivers/usb/gadget/udc/udc-xilinx.c
1151
req = iter;
drivers/vfio/platform/vfio_platform_common.c
34
struct vfio_platform_reset_node *iter;
drivers/vfio/platform/vfio_platform_common.c
38
list_for_each_entry(iter, &reset_list, link) {
drivers/vfio/platform/vfio_platform_common.c
39
if (!strcmp(iter->compat, compat) &&
drivers/vfio/platform/vfio_platform_common.c
40
try_module_get(iter->owner)) {
drivers/vfio/platform/vfio_platform_common.c
41
*module = iter->owner;
drivers/vfio/platform/vfio_platform_common.c
42
reset_fn = iter->of_reset;
drivers/vfio/platform/vfio_platform_common.c
683
struct vfio_platform_reset_node *iter, *temp;
drivers/vfio/platform/vfio_platform_common.c
686
list_for_each_entry_safe(iter, temp, &reset_list, link) {
drivers/vfio/platform/vfio_platform_common.c
687
if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
drivers/vfio/platform/vfio_platform_common.c
688
list_del(&iter->link);
drivers/vfio/vfio_main.c
1151
static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
drivers/vfio/vfio_main.c
1157
return device->log_ops->log_read_and_clear(device, iova, length, iter);
drivers/vfio/vfio_main.c
1169
struct iova_bitmap *iter;
drivers/vfio/vfio_main.c
1192
iter = iova_bitmap_alloc(report.iova, report.length,
drivers/vfio/vfio_main.c
1195
if (IS_ERR(iter))
drivers/vfio/vfio_main.c
1196
return PTR_ERR(iter);
drivers/vfio/vfio_main.c
1198
ret = iova_bitmap_for_each(iter, device,
drivers/vfio/vfio_main.c
1201
iova_bitmap_free(iter);
drivers/vhost/net.c
628
static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
drivers/vhost/net.c
634
iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
drivers/vhost/net.c
635
iov_iter_advance(iter, hdr_size);
drivers/vhost/net.c
637
return iov_iter_count(iter);
drivers/vhost/scsi.c
658
struct iov_iter *iter = cmd->read_iter;
drivers/vhost/scsi.c
671
if (copy_page_to_iter(page, 0, len, iter) != len) {
drivers/vhost/scsi.c
774
static void vhost_scsi_revert_map_iov_to_sgl(struct iov_iter *iter,
drivers/vhost/scsi.c
792
iov_iter_revert(iter, revert_bytes);
drivers/vhost/scsi.c
802
struct iov_iter *iter,
drivers/vhost/scsi.c
815
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
drivers/vhost/scsi.c
853
vhost_scsi_revert_map_iov_to_sgl(iter, *sgl, sg);
drivers/vhost/scsi.c
855
iov_iter_revert(iter, bytes);
drivers/vhost/scsi.c
867
vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
drivers/vhost/scsi.c
871
if (!iter || !iter_iov(iter)) {
drivers/vhost/scsi.c
877
sgl_count = iov_iter_npages(iter, 0xffff);
drivers/vhost/scsi.c
887
vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
drivers/vhost/scsi.c
891
size_t len = iov_iter_count(iter);
drivers/vhost/scsi.c
902
cmd->read_iov = dup_iter(cmd->read_iter, iter, GFP_KERNEL);
drivers/vhost/scsi.c
920
copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
drivers/vhost/scsi.c
947
vhost_scsi_map_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
drivers/vhost/scsi.c
953
while (iov_iter_count(iter)) {
drivers/vhost/scsi.c
954
ret = vhost_scsi_map_to_sgl(cmd, iter, sg_table, &sg, is_prot);
drivers/vhost/scsi.c
956
vhost_scsi_revert_map_iov_to_sgl(iter, sg_table->sgl,
drivers/vhost/vringh.c
1115
struct iov_iter iter;
drivers/vhost/vringh.c
1129
iov_iter_init(&iter, ITER_SOURCE, ivec.iov.iovec, ret,
drivers/vhost/vringh.c
1132
iov_iter_bvec(&iter, ITER_SOURCE, ivec.iov.bvec, ret,
drivers/vhost/vringh.c
1136
size = copy_from_iter(dst, translated, &iter);
drivers/vhost/vringh.c
1162
struct iov_iter iter;
drivers/vhost/vringh.c
1176
iov_iter_init(&iter, ITER_DEST, ivec.iov.iovec, ret,
drivers/vhost/vringh.c
1179
iov_iter_bvec(&iter, ITER_DEST, ivec.iov.bvec, ret,
drivers/vhost/vringh.c
1183
size = copy_to_iter(src, translated, &iter);
drivers/video/fbdev/core/fb_imageblit.h
100
data = *iter->data++ >> BITS_PER_BYTE/2;
drivers/video/fbdev/core/fb_imageblit.h
102
data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1);
drivers/video/fbdev/core/fb_imageblit.h
103
} else if (iter->i != 0) {
drivers/video/fbdev/core/fb_imageblit.h
104
*bits = iter->bpp * iter->i;
drivers/video/fbdev/core/fb_imageblit.h
105
if (iter->top)
drivers/video/fbdev/core/fb_imageblit.h
106
data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1);
drivers/video/fbdev/core/fb_imageblit.h
108
data = *iter->data++ >> BITS_PER_BYTE/2;
drivers/video/fbdev/core/fb_imageblit.h
110
data >>= BITS_PER_BYTE/2 - iter->i;
drivers/video/fbdev/core/fb_imageblit.h
112
iter->i = 0;
drivers/video/fbdev/core/fb_imageblit.h
114
*bits = iter->bpp * BITS_PER_BYTE/2;
drivers/video/fbdev/core/fb_imageblit.h
115
iter->i = iter->width;
drivers/video/fbdev/core/fb_imageblit.h
116
iter->top = false;
drivers/video/fbdev/core/fb_imageblit.h
119
*pixels = (iter->fgxcolor & iter->expand[data]) ^ iter->bgcolor;
drivers/video/fbdev/core/fb_imageblit.h
127
static __always_inline void fb_bitblit(bool (*get)(void *iter, unsigned long *pixels,
drivers/video/fbdev/core/fb_imageblit.h
129
void *iter, int bits, struct fb_address *dst,
drivers/video/fbdev/core/fb_imageblit.h
145
while (get(iter, &pixels, &bits)) {
drivers/video/fbdev/core/fb_imageblit.h
172
struct fb_color_iter iter;
drivers/video/fbdev/core/fb_imageblit.h
175
iter.data = (const u8 *)image->data;
drivers/video/fbdev/core/fb_imageblit.h
176
iter.palette = palette;
drivers/video/fbdev/core/fb_imageblit.h
177
iter.reverse = reverse;
drivers/video/fbdev/core/fb_imageblit.h
180
iter.shift = BITS_PER_BYTE - bpp;
drivers/video/fbdev/core/fb_imageblit.h
182
iter.shift = 0;
drivers/video/fbdev/core/fb_imageblit.h
185
iter.shift = BITS_PER_LONG - BITS_PER_BYTE;
drivers/video/fbdev/core/fb_imageblit.h
187
iter.shift = BITS_PER_LONG - bpp;
drivers/video/fbdev/core/fb_imageblit.h
189
iter.width = image->width;
drivers/video/fbdev/core/fb_imageblit.h
190
iter.i = 0;
drivers/video/fbdev/core/fb_imageblit.h
194
fb_bitblit(fb_color_image, &iter, bpp, dst, reverse);
drivers/video/fbdev/core/fb_imageblit.h
232
struct fb_bitmap4x_iter iter;
drivers/video/fbdev/core/fb_imageblit.h
235
iter.data = (const u8 *)image->data;
drivers/video/fbdev/core/fb_imageblit.h
240
iter.fgxcolor = (fgcolor ^ bgcolor) * mul[bpp-1];
drivers/video/fbdev/core/fb_imageblit.h
241
iter.bgcolor = bgcolor * mul[bpp-1];
drivers/video/fbdev/core/fb_imageblit.h
242
iter.width = image->width;
drivers/video/fbdev/core/fb_imageblit.h
243
iter.i = image->width;
drivers/video/fbdev/core/fb_imageblit.h
244
iter.expand = expand[bpp-1];
drivers/video/fbdev/core/fb_imageblit.h
245
iter.bpp = bpp;
drivers/video/fbdev/core/fb_imageblit.h
246
iter.top = false;
drivers/video/fbdev/core/fb_imageblit.h
250
fb_bitblit(fb_bitmap4x_image, &iter, bpp * BITS_PER_BYTE/2, dst, reverse);
drivers/video/fbdev/core/fb_imageblit.h
260
struct fb_bitmap_iter iter;
drivers/video/fbdev/core/fb_imageblit.h
263
iter.colors[0] = bgcolor;
drivers/video/fbdev/core/fb_imageblit.h
264
iter.colors[1] = fgcolor;
drivers/video/fbdev/core/fb_imageblit.h
266
iter.colors[0] <<= BITS_PER_LONG - bpp;
drivers/video/fbdev/core/fb_imageblit.h
267
iter.colors[1] <<= BITS_PER_LONG - bpp;
drivers/video/fbdev/core/fb_imageblit.h
269
iter.data = (const u8 *)image->data;
drivers/video/fbdev/core/fb_imageblit.h
270
iter.width = image->width;
drivers/video/fbdev/core/fb_imageblit.h
271
iter.i = 0;
drivers/video/fbdev/core/fb_imageblit.h
275
fb_bitblit(fb_bitmap_image, &iter, bpp, dst, reverse);
drivers/video/fbdev/core/fb_imageblit.h
39
struct fb_bitmap_iter *iter = iterator;
drivers/video/fbdev/core/fb_imageblit.h
41
if (iter->i < iter->width) {
drivers/video/fbdev/core/fb_imageblit.h
42
int bit = ~iter->i & (BITS_PER_BYTE-1);
drivers/video/fbdev/core/fb_imageblit.h
43
int byte = iter->i++ / BITS_PER_BYTE;
drivers/video/fbdev/core/fb_imageblit.h
45
*pixels = iter->colors[(iter->data[byte] >> bit) & 1];
drivers/video/fbdev/core/fb_imageblit.h
48
iter->data += BITS_TO_BYTES(iter->width);
drivers/video/fbdev/core/fb_imageblit.h
49
iter->i = 0;
drivers/video/fbdev/core/fb_imageblit.h
64
struct fb_color_iter *iter = iterator;
drivers/video/fbdev/core/fb_imageblit.h
66
if (iter->i < iter->width) {
drivers/video/fbdev/core/fb_imageblit.h
67
unsigned long color = iter->data[iter->i++];
drivers/video/fbdev/core/fb_imageblit.h
69
if (iter->palette)
drivers/video/fbdev/core/fb_imageblit.h
70
color = iter->palette[color];
drivers/video/fbdev/core/fb_imageblit.h
71
*pixels = color << iter->shift;
drivers/video/fbdev/core/fb_imageblit.h
72
if (iter->reverse.pixel)
drivers/video/fbdev/core/fb_imageblit.h
76
iter->data += iter->width;
drivers/video/fbdev/core/fb_imageblit.h
77
iter->i = 0;
drivers/video/fbdev/core/fb_imageblit.h
93
struct fb_bitmap4x_iter *iter = iterator;
drivers/video/fbdev/core/fb_imageblit.h
96
if (iter->i >= BITS_PER_BYTE/2) {
drivers/video/fbdev/core/fb_imageblit.h
97
iter->i -= BITS_PER_BYTE/2;
drivers/video/fbdev/core/fb_imageblit.h
98
iter->top = !iter->top;
drivers/video/fbdev/core/fb_imageblit.h
99
if (iter->top)
drivers/video/fbdev/mmp/core.c
130
struct mmp_path *path = NULL, *iter;
drivers/video/fbdev/mmp/core.c
133
list_for_each_entry(iter, &path_list, node) {
drivers/video/fbdev/mmp/core.c
134
if (!strcmp(name, iter->name)) {
drivers/video/fbdev/mmp/core.c
135
path = iter;
drivers/video/fbdev/riva/riva_hw.c
244
int iter = 0;
drivers/video/fbdev/riva/riva_hw.c
329
iter++;
drivers/video/fbdev/riva/riva_hw.c
375
if (iter>100)
drivers/w1/w1.c
822
struct w1_master *dev = NULL, *iter;
drivers/w1/w1.c
825
list_for_each_entry(iter, &w1_masters, w1_master_entry) {
drivers/w1/w1.c
826
if (iter->id == id) {
drivers/w1/w1.c
827
dev = iter;
drivers/w1/w1.c
828
atomic_inc(&iter->refcnt);
drivers/w1/w1.c
840
struct w1_slave *sl = NULL, *iter;
drivers/w1/w1.c
845
list_for_each_entry(iter, &dev->slist, w1_slave_entry) {
drivers/w1/w1.c
846
if (iter->reg_num.family == id->family &&
drivers/w1/w1.c
847
iter->reg_num.id == id->id &&
drivers/w1/w1.c
848
iter->reg_num.crc == id->crc) {
drivers/w1/w1.c
849
sl = iter;
drivers/w1/w1.c
851
atomic_inc(&iter->refcnt);
drivers/xen/pvcalls-back.c
1002
struct radix_tree_iter iter;
drivers/xen/pvcalls-back.c
1014
radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
drivers/xen/pvcalls-back.c
1020
slot = radix_tree_iter_retry(&iter);
fs/affs/file.c
397
affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/affs/file.c
402
size_t count = iov_iter_count(iter);
fs/affs/file.c
406
if (iov_iter_rw(iter) == WRITE) {
fs/affs/file.c
413
ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
fs/affs/file.c
414
if (ret < 0 && iov_iter_rw(iter) == WRITE)
fs/afs/cmservice.c
259
call->iter = &call->def_iter;
fs/afs/cmservice.c
266
iov_iter_count(call->iter), call->count2 * 3 * 4);
fs/afs/dir.c
133
struct iov_iter iter;
fs/afs/dir.c
139
iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
fs/afs/dir.c
140
iterate_folioq(&iter, iov_iter_count(&iter), NULL, NULL,
fs/afs/dir.c
195
struct iov_iter iter;
fs/afs/dir.c
202
iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
fs/afs/dir.c
203
checked = iterate_folioq(&iter, iov_iter_count(&iter), dvnode, NULL,
fs/afs/dir.c
2201
struct iov_iter iter;
fs/afs/dir.c
2214
iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0,
fs/afs/dir.c
2216
ret = netfs_writeback_single(mapping, wbc, &iter);
fs/afs/dir.c
233
struct iov_iter iter;
fs/afs/dir.c
266
iov_iter_folio_queue(&iter, ITER_DEST, dvnode->directory, 0, 0, dvnode->directory_size);
fs/afs/dir.c
272
ret = netfs_read_single(&dvnode->netfs.inode, file, &iter);
fs/afs/dir.c
496
struct iov_iter iter;
fs/afs/dir.c
505
iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
fs/afs/dir.c
506
iov_iter_advance(&iter, round_down(dir_ctx->pos, AFS_DIR_BLOCK_SIZE));
fs/afs/dir.c
508
iterate_folioq(&iter, iov_iter_count(&iter), dvnode, &ctx,
fs/afs/dir_edit.c
111
static union afs_xdr_dir_block *afs_dir_get_block(struct afs_dir_iter *iter, size_t block)
fs/afs/dir_edit.c
114
struct afs_vnode *dvnode = iter->dvnode;
fs/afs/dir_edit.c
117
size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
fs/afs/dir_edit.c
131
fq = iter->fq;
fs/afs/dir_edit.c
137
for (int s = iter->fq_slot; s < folioq_count(fq); s++) {
fs/afs/dir_edit.c
145
iter->fq = fq;
fs/afs/dir_edit.c
146
iter->fq_slot = s;
fs/afs/dir_edit.c
147
iter->fpos = fpos;
fs/afs/dir_edit.c
152
iter->fq_slot = 0;
fs/afs/dir_edit.c
156
iter->fq = NULL;
fs/afs/dir_edit.c
157
iter->fq_slot = 0;
fs/afs/dir_edit.c
247
struct afs_dir_iter iter = { .dvnode = vnode };
fs/afs/dir_edit.c
261
meta = afs_dir_get_block(&iter, 0);
fs/afs/dir_edit.c
266
iter.nr_slots = afs_dir_calc_slots(name->len);
fs/afs/dir_edit.c
284
meta->meta.alloc_ctrs[b] < iter.nr_slots)
fs/afs/dir_edit.c
287
block = afs_dir_get_block(&iter, b);
fs/afs/dir_edit.c
311
slot = afs_find_contig_bits(block, iter.nr_slots);
fs/afs/dir_edit.c
332
block = afs_dir_get_block(&iter, 0);
fs/afs/dir_edit.c
350
afs_set_contig_bits(block, slot, iter.nr_slots);
fs/afs/dir_edit.c
354
meta->meta.alloc_ctrs[b] -= iter.nr_slots;
fs/afs/dir_edit.c
358
iter.bucket = afs_dir_hash_name(name);
fs/afs/dir_edit.c
359
de->u.hash_next = meta->meta.hashtable[iter.bucket];
fs/afs/dir_edit.c
360
meta->meta.hashtable[iter.bucket] = htons(entry);
fs/afs/dir_edit.c
398
struct afs_dir_iter iter = { .dvnode = vnode };
fs/afs/dir_edit.c
415
if (!afs_dir_init_iter(&iter, name))
fs/afs/dir_edit.c
418
meta = afs_dir_find_block(&iter, 0);
fs/afs/dir_edit.c
423
found = afs_dir_search_bucket(&iter, name, &fid);
fs/afs/dir_edit.c
436
block = afs_dir_find_block(&iter, b);
fs/afs/dir_edit.c
452
afs_clear_contig_bits(block, slot, iter.nr_slots);
fs/afs/dir_edit.c
456
meta->meta.alloc_ctrs[b] += iter.nr_slots;
fs/afs/dir_edit.c
460
memset(de, 0, sizeof(*de) * iter.nr_slots);
fs/afs/dir_edit.c
466
if (!iter.prev_entry) {
fs/afs/dir_edit.c
467
__be16 prev_next = meta->meta.hashtable[iter.bucket];
fs/afs/dir_edit.c
472
iter.bucket, iter.prev_entry, prev_next, entry,
fs/afs/dir_edit.c
476
meta->meta.hashtable[iter.bucket] = next;
fs/afs/dir_edit.c
478
unsigned int pb = iter.prev_entry / AFS_DIR_SLOTS_PER_BLOCK;
fs/afs/dir_edit.c
479
unsigned int ps = iter.prev_entry % AFS_DIR_SLOTS_PER_BLOCK;
fs/afs/dir_edit.c
482
pblock = afs_dir_find_block(&iter, pb);
fs/afs/dir_edit.c
491
iter.bucket, iter.prev_entry, prev_next, entry,
fs/afs/dir_edit.c
533
struct afs_dir_iter iter = { .dvnode = vnode };
fs/afs/dir_edit.c
552
block = afs_dir_get_block(&iter, b);
fs/afs/dir_edit.c
607
struct afs_dir_iter iter = { .dvnode = dvnode };
fs/afs/dir_edit.c
618
meta = afs_dir_get_block(&iter, 0);
fs/afs/dir_search.c
102
iter->fq = fq;
fs/afs/dir_search.c
103
iter->fq_slot = slot;
fs/afs/dir_search.c
104
iter->fpos = fpos;
fs/afs/dir_search.c
105
iter->block = kmap_local_folio(folio, blpos - fpos);
fs/afs/dir_search.c
106
return iter->block;
fs/afs/dir_search.c
114
iter->fq = NULL;
fs/afs/dir_search.c
115
iter->fq_slot = 0;
fs/afs/dir_search.c
123
int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
fs/afs/dir_search.c
130
meta = afs_dir_find_block(iter, 0);
fs/afs/dir_search.c
134
entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]);
fs/afs/dir_search.c
135
_enter("%x,%x", iter->bucket, entry);
fs/afs/dir_search.c
148
iter->bucket, resv, slot, slot + iter->nr_slots - 1);
fs/afs/dir_search.c
152
block = afs_dir_find_block(iter, blnum);
fs/afs/dir_search.c
157
if (slot + iter->nr_slots <= AFS_DIR_SLOTS_PER_BLOCK &&
fs/afs/dir_search.c
166
iter->prev_entry = entry;
fs/afs/dir_search.c
168
if (!--iter->loop_check) {
fs/afs/dir_search.c
169
kdebug("dir chain loop h=%x", iter->bucket);
fs/afs/dir_search.c
176
if (iter->block) {
fs/afs/dir_search.c
177
kunmap_local(iter->block);
fs/afs/dir_search.c
178
iter->block = NULL;
fs/afs/dir_search.c
183
afs_invalidate_dir(iter->dvnode, afs_dir_invalid_iter_stale);
fs/afs/dir_search.c
194
struct afs_dir_iter iter = { .dvnode = dvnode, };
fs/afs/dir_search.c
199
if (!afs_dir_init_iter(&iter, name))
fs/afs/dir_search.c
219
ret = afs_dir_search_bucket(&iter, name, _fid);
fs/afs/dir_search.c
222
afs_dir_reset_iter(&iter);
fs/afs/dir_search.c
40
static bool afs_dir_reset_iter(struct afs_dir_iter *iter)
fs/afs/dir_search.c
42
unsigned long long i_size = i_size_read(&iter->dvnode->netfs.inode);
fs/afs/dir_search.c
49
iter->loop_check = nblocks * (AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS);
fs/afs/dir_search.c
50
iter->prev_entry = 0; /* Hash head is previous */
fs/afs/dir_search.c
57
bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name)
fs/afs/dir_search.c
59
iter->nr_slots = afs_dir_calc_slots(name->len);
fs/afs/dir_search.c
60
iter->bucket = afs_dir_hash_name(name);
fs/afs/dir_search.c
61
return afs_dir_reset_iter(iter);
fs/afs/dir_search.c
67
union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block)
fs/afs/dir_search.c
69
struct folio_queue *fq = iter->fq;
fs/afs/dir_search.c
70
struct afs_vnode *dvnode = iter->dvnode;
fs/afs/dir_search.c
73
size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
fs/afs/dir_search.c
74
int slot = iter->fq_slot;
fs/afs/dir_search.c
78
if (iter->block) {
fs/afs/dir_search.c
79
kunmap_local(iter->block);
fs/afs/dir_search.c
80
iter->block = NULL;
fs/afs/file.c
24
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
fs/afs/file.c
529
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/afs/file.c
537
return netfs_unbuffered_read_iter(iocb, iter);
fs/afs/file.c
544
ret = filemap_read(iocb, iter, 0);
fs/afs/fsclient.c
1674
_enter("{%u,%zu}", call->unmarshall, iov_iter_count(call->iter));
fs/afs/fsclient.c
311
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
fs/afs/fsclient.c
342
call->iter = &subreq->io_iter;
fs/afs/fsclient.c
358
call->iter = &call->def_iter;
fs/afs/fsclient.c
369
iov_iter_count(call->iter), call->remaining);
fs/afs/internal.h
1124
bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name);
fs/afs/internal.h
1125
union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block);
fs/afs/internal.h
1126
int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
fs/afs/internal.h
145
struct iov_iter *iter; /* Iterator currently in use */
fs/afs/rxrpc.c
176
call->iter = &call->def_iter;
fs/afs/rxrpc.c
806
_enter("{%zu}", iov_iter_count(call->iter));
fs/afs/rxrpc.c
929
struct iov_iter *iter = call->iter;
fs/afs/rxrpc.c
935
call->type->name, call->iov_len, iov_iter_count(iter), want_more);
fs/afs/rxrpc.c
937
ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
fs/afs/rxrpc.c
940
trace_afs_receive_data(call, call->iter, want_more, ret);
fs/afs/vlclient.c
181
call->unmarshall, iov_iter_count(call->iter), call->count);
fs/afs/vlclient.c
323
call->unmarshall, iov_iter_count(call->iter), call->count);
fs/afs/vlclient.c
439
call->unmarshall, iov_iter_count(call->iter), call->count2);
fs/afs/vlclient.c
687
call->unmarshall, iov_iter_count(call->iter), call->count);
fs/afs/yfsclient.c
362
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
fs/afs/yfsclient.c
388
call->iter = &subreq->io_iter;
fs/afs/yfsclient.c
403
call->iter = &call->def_iter;
fs/afs/yfsclient.c
414
iov_iter_count(call->iter), call->remaining);
fs/aio.c
1546
struct iov_iter *iter)
fs/aio.c
1552
ssize_t ret = import_ubuf(rw, buf, len, iter);
fs/aio.c
1557
return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
fs/aio.c
1584
struct iov_iter iter;
fs/aio.c
1597
ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter);
fs/aio.c
1600
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
fs/aio.c
1602
aio_rw_done(req, file->f_op->read_iter(req, &iter));
fs/aio.c
1611
struct iov_iter iter;
fs/aio.c
1625
ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter);
fs/aio.c
1628
ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
fs/aio.c
1633
aio_rw_done(req, file->f_op->write_iter(req, &iter));
fs/backing-file.c
160
static int do_backing_file_read_iter(struct file *file, struct iov_iter *iter,
fs/backing-file.c
169
return vfs_iter_read(file, iter, &iocb->ki_pos, rwf);
fs/backing-file.c
180
ret = vfs_iocb_iter_read(file, &aio->iocb, iter);
fs/backing-file.c
187
ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
fs/backing-file.c
196
if (!iov_iter_count(iter))
fs/backing-file.c
204
ret = do_backing_file_read_iter(file, iter, iocb, flags);
fs/backing-file.c
213
static int do_backing_file_write_iter(struct file *file, struct iov_iter *iter,
fs/backing-file.c
223
ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf);
fs/backing-file.c
243
ret = vfs_iocb_iter_write(file, &aio->iocb, iter);
fs/backing-file.c
250
ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
fs/backing-file.c
259
if (!iov_iter_count(iter))
fs/backing-file.c
271
return do_backing_file_write_iter(file, iter, iocb, flags, ctx->end_write);
fs/btrfs/backref.c
2849
static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
fs/btrfs/backref.c
2851
iter->bytenr = 0;
fs/btrfs/backref.c
2852
iter->item_ptr = 0;
fs/btrfs/backref.c
2853
iter->cur_ptr = 0;
fs/btrfs/backref.c
2854
iter->end_ptr = 0;
fs/btrfs/backref.c
2855
btrfs_release_path(iter->path);
fs/btrfs/backref.c
2856
memset(&iter->cur_key, 0, sizeof(iter->cur_key));
fs/btrfs/backref.c
2859
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
fs/btrfs/backref.c
2861
struct btrfs_fs_info *fs_info = iter->fs_info;
fs/btrfs/backref.c
2863
struct btrfs_path *path = iter->path;
fs/btrfs/backref.c
2878
iter->bytenr = bytenr;
fs/btrfs/backref.c
2904
memcpy(&iter->cur_key, &key, sizeof(key));
fs/btrfs/backref.c
2905
iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
fs/btrfs/backref.c
2907
iter->end_ptr = (u32)(iter->item_ptr +
fs/btrfs/backref.c
2923
iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
fs/btrfs/backref.c
2926
if (iter->cur_ptr >= iter->end_ptr) {
fs/btrfs/backref.c
2937
btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
fs/btrfs/backref.c
2939
if (iter->cur_key.objectid != bytenr ||
fs/btrfs/backref.c
2940
(iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
fs/btrfs/backref.c
2941
iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
fs/btrfs/backref.c
2945
iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
fs/btrfs/backref.c
2947
iter->item_ptr = iter->cur_ptr;
fs/btrfs/backref.c
2948
iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
fs/btrfs/backref.c
2954
btrfs_backref_iter_release(iter);
fs/btrfs/backref.c
2958
static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
fs/btrfs/backref.c
2960
if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
fs/btrfs/backref.c
2961
iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
fs/btrfs/backref.c
2976
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
fs/btrfs/backref.c
2978
struct extent_buffer *eb = iter->path->nodes[0];
fs/btrfs/backref.c
2980
struct btrfs_path *path = iter->path;
fs/btrfs/backref.c
2985
if (btrfs_backref_iter_is_inline_ref(iter)) {
fs/btrfs/backref.c
2987
ASSERT(iter->cur_ptr < iter->end_ptr);
fs/btrfs/backref.c
2989
if (btrfs_backref_has_tree_block_info(iter)) {
fs/btrfs/backref.c
2997
((unsigned long)iter->cur_ptr);
fs/btrfs/backref.c
3002
iter->cur_ptr += size;
fs/btrfs/backref.c
3003
if (iter->cur_ptr < iter->end_ptr)
fs/btrfs/backref.c
3010
extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
fs/btrfs/backref.c
3012
btrfs_err(iter->fs_info,
fs/btrfs/backref.c
3014
iter->bytenr);
fs/btrfs/backref.c
3018
ret = btrfs_next_item(extent_root, iter->path);
fs/btrfs/backref.c
3022
btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
fs/btrfs/backref.c
3023
if (iter->cur_key.objectid != iter->bytenr ||
fs/btrfs/backref.c
3024
(iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
fs/btrfs/backref.c
3025
iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
fs/btrfs/backref.c
3027
iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
fs/btrfs/backref.c
3029
iter->cur_ptr = iter->item_ptr;
fs/btrfs/backref.c
3030
iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
fs/btrfs/backref.c
3459
struct btrfs_backref_iter *iter,
fs/btrfs/backref.c
3467
ret = btrfs_backref_iter_start(iter, cur->bytenr);
fs/btrfs/backref.c
3474
if (btrfs_backref_has_tree_block_info(iter)) {
fs/btrfs/backref.c
3475
ret = btrfs_backref_iter_next(iter);
fs/btrfs/backref.c
3505
for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
fs/btrfs/backref.c
3511
eb = iter->path->nodes[0];
fs/btrfs/backref.c
3513
key.objectid = iter->bytenr;
fs/btrfs/backref.c
3514
if (btrfs_backref_iter_is_inline_ref(iter)) {
fs/btrfs/backref.c
3519
((unsigned long)iter->cur_ptr);
fs/btrfs/backref.c
3529
key.type = iter->cur_key.type;
fs/btrfs/backref.c
3530
key.offset = iter->cur_key.offset;
fs/btrfs/backref.c
3571
btrfs_backref_iter_release(iter);
fs/btrfs/backref.h
297
struct btrfs_backref_iter *iter)
fs/btrfs/backref.h
299
if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY &&
fs/btrfs/backref.h
300
iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item))
fs/btrfs/backref.h
305
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
fs/btrfs/backref.h
307
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);
fs/btrfs/backref.h
460
struct btrfs_backref_iter *iter,
fs/btrfs/bio.c
309
struct bvec_iter *iter = &bbio->saved_iter;
fs/btrfs/bio.c
331
btrfs_bio_for_each_block(paddr, &bbio->bio, iter, step) {
fs/btrfs/bio.c
877
struct bvec_iter iter;
fs/btrfs/bio.c
889
bio_for_each_bvec(bvec, &bbio->bio, iter)
fs/btrfs/bio.c
894
btrfs_ino(bbio->inode), logical, length, iter.bi_idx,
fs/btrfs/btrfs_inode.h
639
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/btrfs_inode.h
643
ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/direct-io.c
1002
const struct iov_iter *iter, loff_t offset)
fs/btrfs/direct-io.c
1007
ret = check_direct_IO(fs_info, iter, offset);
fs/btrfs/direct-io.c
1011
if (!iter_is_iovec(iter))
fs/btrfs/direct-io.c
1014
for (seg = 0; seg < iter->nr_segs; seg++) {
fs/btrfs/direct-io.c
1015
for (i = seg + 1; i < iter->nr_segs; i++) {
fs/btrfs/direct-io.c
1016
const struct iovec *iov1 = iter_iov(iter) + seg;
fs/btrfs/direct-io.c
1017
const struct iovec *iov2 = iter_iov(iter) + i;
fs/btrfs/direct-io.c
359
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
fs/btrfs/direct-io.c
363
struct btrfs_dio_data *dio_data = iter->private;
fs/btrfs/direct-io.c
611
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
fs/btrfs/direct-io.c
612
struct btrfs_dio_data *dio_data = iter->private;
fs/btrfs/direct-io.c
710
static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio,
fs/btrfs/direct-io.c
716
struct btrfs_dio_data *dio_data = iter->private;
fs/btrfs/direct-io.c
718
btrfs_bio_init(bbio, BTRFS_I(iter->inode), file_offset,
fs/btrfs/direct-io.c
733
if (iter->flags & IOMAP_WRITE) {
fs/btrfs/direct-io.c
760
static ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/direct-io.c
765
return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
fs/btrfs/direct-io.c
769
static struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/direct-io.c
774
return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
fs/btrfs/direct-io.c
779
const struct iov_iter *iter, loff_t offset)
fs/btrfs/direct-io.c
786
if (iov_iter_alignment(iter) & blocksize_mask)
fs/btrfs/file-item.c
780
struct bvec_iter iter = *src;
fs/btrfs/file-item.c
789
btrfs_bio_for_each_block(paddr, bio, &iter, step) {
fs/btrfs/file.c
1184
const struct iov_iter *iter, u64 start)
fs/btrfs/file.c
1189
iov_iter_count(iter));
fs/btrfs/file.c
1199
static int copy_one_range(struct btrfs_inode *inode, struct iov_iter *iter,
fs/btrfs/file.c
1205
size_t write_bytes = calc_write_bytes(inode, iter, start);
fs/btrfs/file.c
1221
if (unlikely(fault_in_iov_iter_readable(iter, write_bytes)))
fs/btrfs/file.c
1280
write_bytes, iter);
fs/btrfs/file.c
1294
iov_iter_revert(iter, copied);
fs/btrfs/file.c
1349
ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
fs/btrfs/file.c
1375
ret = generic_write_checks(iocb, iter);
fs/btrfs/file.c
1384
while (iov_iter_count(iter) > 0) {
fs/btrfs/file.c
1385
ret = copy_one_range(BTRFS_I(inode), iter, &data_reserved, pos, nowait);
fs/btrfs/inode.c
9456
struct iov_iter *iter, u64 start,
fs/btrfs/inode.c
9532
ret = copy_to_iter(tmp, count, iter);
fs/btrfs/inode.c
9644
ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/inode.c
9690
iter) != bytes) {
fs/btrfs/inode.c
9708
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/inode.c
9717
size_t count = iov_iter_count(iter);
fs/btrfs/inode.c
9798
ret = btrfs_encoded_read_inline(iocb, iter, start, lockend,
fs/btrfs/inode.c
9856
ret = iov_iter_zero(count, iter);
fs/btrfs/ioctl.c
4302
struct iov_iter iter;
fs/btrfs/ioctl.c
4344
&iov, &iter);
fs/btrfs/ioctl.c
4348
if (iov_iter_count(&iter) == 0) {
fs/btrfs/ioctl.c
4360
ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state,
fs/btrfs/ioctl.c
4375
ret = btrfs_encoded_read_regular(&kiocb, &iter, start, lockend,
fs/btrfs/ioctl.c
4408
struct iov_iter iter;
fs/btrfs/ioctl.c
4468
&iov, &iter);
fs/btrfs/ioctl.c
4472
if (iov_iter_count(&iter) == 0) {
fs/btrfs/ioctl.c
4489
ret = btrfs_do_write_iter(&kiocb, &iter, &args);
fs/btrfs/ioctl.c
4507
struct iov_iter iter;
fs/btrfs/ioctl.c
4521
struct iov_iter iter;
fs/btrfs/ioctl.c
4567
&priv->iter) != bytes) {
fs/btrfs/ioctl.c
4605
static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
fs/btrfs/ioctl.c
4637
priv->iter = *iter;
fs/btrfs/ioctl.c
4756
&data->iter);
fs/btrfs/ioctl.c
4760
if (iov_iter_count(&data->iter) == 0) {
fs/btrfs/ioctl.c
4780
ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state,
fs/btrfs/ioctl.c
4801
u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size);
fs/btrfs/ioctl.c
4807
ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend,
fs/btrfs/ioctl.c
4911
&data->iter);
fs/btrfs/ioctl.c
4915
if (iov_iter_count(&data->iter) == 0) {
fs/btrfs/ioctl.c
4939
ret = btrfs_do_write_iter(&kiocb, &data->iter, &data->args);
fs/btrfs/misc.h
31
static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
fs/btrfs/misc.h
33
struct bio_vec bv = bio_iter_iovec(bio, *iter);
fs/btrfs/misc.h
50
#define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
fs/btrfs/misc.h
51
for (; (iter)->bi_size && \
fs/btrfs/misc.h
52
(paddr = bio_iter_phys((bio), (iter)), 1); \
fs/btrfs/misc.h
53
bio_advance_iter_single((bio), (iter), (blocksize)))
fs/btrfs/misc.h
74
for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
fs/btrfs/misc.h
75
(iter).bi_size && \
fs/btrfs/misc.h
76
(paddr = bio_iter_phys((bio), &(iter)), 1); \
fs/btrfs/misc.h
77
bio_advance_iter_single((bio), &(iter), (blocksize)))
fs/btrfs/qgroup.c
4640
struct ulist_iterator iter;
fs/btrfs/qgroup.c
4649
ULIST_ITER_INIT(&iter);
fs/btrfs/qgroup.c
4650
while ((unode = ulist_next(&changeset.range_changed, &iter))) {
fs/btrfs/raid56.c
1313
struct bvec_iter iter = bio->bi_iter;
fs/btrfs/raid56.c
1318
btrfs_bio_for_each_block(paddr, bio, &iter, step) {
fs/btrfs/relocation.c
410
struct btrfs_backref_iter *iter;
fs/btrfs/relocation.c
419
iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
fs/btrfs/relocation.c
420
if (!iter)
fs/btrfs/relocation.c
438
ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
fs/btrfs/relocation.c
463
btrfs_free_path(iter->path);
fs/btrfs/relocation.c
464
kfree(iter);
fs/cachefiles/internal.h
259
struct iov_iter *iter,
fs/cachefiles/io.c
124
iov_iter_zero(len, iter);
fs/cachefiles/io.c
131
iov_iter_zero(skipped, iter);
fs/cachefiles/io.c
161
ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
fs/cachefiles/io.c
284
struct iov_iter *iter,
fs/cachefiles/io.c
292
size_t len = iov_iter_count(iter);
fs/cachefiles/io.c
332
ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
fs/cachefiles/io.c
361
struct iov_iter *iter,
fs/cachefiles/io.c
374
start_pos, iter,
fs/cachefiles/io.c
77
struct iov_iter *iter,
fs/cachefiles/io.c
87
size_t len = iov_iter_count(iter), skipped = 0;
fs/cachefiles/ondemand.c
59
struct iov_iter *iter)
fs/cachefiles/ondemand.c
64
size_t len = iter->count, aligned_len = len;
fs/cachefiles/ondemand.c
85
ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
fs/ceph/caps.c
1673
struct ceph_cap_flush *cf = NULL, *iter;
fs/ceph/caps.c
1683
list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) {
fs/ceph/caps.c
1684
if (iter->tid >= first_tid) {
fs/ceph/caps.c
1685
cf = iter;
fs/ceph/caps.c
3348
struct ceph_cap_snap *capsnap = NULL, *iter;
fs/ceph/caps.c
3376
list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
fs/ceph/caps.c
3377
if (iter->context == snapc) {
fs/ceph/caps.c
3378
capsnap = iter;
fs/ceph/caps.c
3977
struct ceph_cap_snap *capsnap = NULL, *iter;
fs/ceph/caps.c
3985
list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
fs/ceph/caps.c
3986
if (iter->follows == follows) {
fs/ceph/caps.c
3987
if (iter->cap_flush.tid != flush_tid) {
fs/ceph/caps.c
3989
"tid %lld != %lld\n", iter,
fs/ceph/caps.c
3991
iter->cap_flush.tid);
fs/ceph/caps.c
3994
capsnap = iter;
fs/ceph/caps.c
3998
iter, iter->follows);
fs/ceph/file.c
100
bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
fs/ceph/file.c
127
static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
fs/ceph/file.c
131
size_t orig_count = iov_iter_count(iter);
fs/ceph/file.c
1333
unsigned int len = osd_data->bvec_pos.iter.bi_size;
fs/ceph/file.c
135
iov_iter_truncate(iter, maxsize);
fs/ceph/file.c
136
npages = iov_iter_npages(iter, INT_MAX);
fs/ceph/file.c
137
iov_iter_reexpand(iter, orig_count);
fs/ceph/file.c
147
bytes = __iter_get_bvecs(iter, maxsize, bv);
fs/ceph/file.c
1474
ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
fs/ceph/file.c
1492
size_t count = iov_iter_count(iter);
fs/ceph/file.c
1494
bool write = iov_iter_rw(iter) == WRITE;
fs/ceph/file.c
1495
bool should_dirty = !write && user_backed_iter(iter);
fs/ceph/file.c
1522
while (iov_iter_count(iter) > 0) {
fs/ceph/file.c
1523
u64 size = iov_iter_count(iter);
fs/ceph/file.c
1558
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
fs/ceph/file.c
85
static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
fs/ceph/file.c
91
if (maxsize > iov_iter_count(iter))
fs/ceph/file.c
92
maxsize = iov_iter_count(iter);
fs/ceph/inode.c
2407
struct iov_iter iter;
fs/ceph/inode.c
2450
iov_iter_kvec(&iter, READ, &iov, 1, len);
fs/ceph/inode.c
2453
ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
fs/coredump.c
1279
struct iov_iter iter;
fs/coredump.c
1298
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
fs/coredump.c
1299
n = __kernel_write_iter(cprm->file, &iter, &pos);
fs/dax.c
1003
static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
fs/dax.c
1005
pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
fs/dax.c
1011
rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
fs/dax.c
1028
static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
fs/dax.c
1031
return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
fs/dax.c
1032
(iter->iomap.flags & IOMAP_F_DIRTY);
fs/dax.c
1043
const struct iomap_iter *iter, void *entry, unsigned long pfn,
fs/dax.c
1048
bool write = iter->flags & IOMAP_WRITE;
fs/dax.c
1049
bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
fs/dax.c
1050
bool shared = iter->iomap.flags & IOMAP_F_SHARED;
fs/dax.c
1359
const struct iomap_iter *iter, void **entry)
fs/dax.c
1361
struct inode *inode = iter->inode;
fs/dax.c
1366
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
fs/dax.c
1375
const struct iomap_iter *iter, void **entry)
fs/dax.c
1389
*entry = dax_insert_entry(xas, vmf, iter, *entry, folio_pfn(zero_folio),
fs/dax.c
1399
const struct iomap_iter *iter, void **entry)
fs/dax.c
1405
static int dax_unshare_iter(struct iomap_iter *iter)
fs/dax.c
1407
struct iomap *iomap = &iter->iomap;
fs/dax.c
1408
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/dax.c
1409
loff_t copy_pos = iter->pos;
fs/dax.c
1410
u64 copy_len = iomap_length(iter);
fs/dax.c
1416
if (!iomap_want_unshare_iter(iter))
fs/dax.c
1417
return iomap_iter_advance_full(iter);
fs/dax.c
1434
invalidate_inode_pages2_range(iter->inode->i_mapping,
fs/dax.c
1454
return iomap_iter_advance_full(iter);
fs/dax.c
1460
struct iomap_iter iter = {
fs/dax.c
1471
iter.len = min(len, size - pos);
fs/dax.c
1472
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/dax.c
1473
iter.status = dax_unshare_iter(&iter);
fs/dax.c
1478
static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
fs/dax.c
1480
const struct iomap *iomap = &iter->iomap;
fs/dax.c
1481
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/dax.c
1501
static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
fs/dax.c
1503
const struct iomap *iomap = &iter->iomap;
fs/dax.c
1504
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/dax.c
1505
u64 length = iomap_length(iter);
fs/dax.c
1510
return iomap_iter_advance(iter, length);
fs/dax.c
1517
invalidate_inode_pages2_range(iter->inode->i_mapping,
fs/dax.c
1518
iter->pos >> PAGE_SHIFT,
fs/dax.c
1519
(iter->pos + length - 1) >> PAGE_SHIFT);
fs/dax.c
1522
loff_t pos = iter->pos;
fs/dax.c
1533
ret = dax_memzero(iter, pos, length);
fs/dax.c
1539
ret = iomap_iter_advance(iter, length);
fs/dax.c
1542
} while ((length = iomap_length(iter)) > 0);
fs/dax.c
1552
struct iomap_iter iter = {
fs/dax.c
1560
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/dax.c
1561
iter.status = dax_zero_iter(&iter, did_zero);
fs/dax.c
1579
static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter)
fs/dax.c
1587
bool write = iov_iter_rw(iter) == WRITE;
fs/dax.c
1599
done = iov_iter_zero(min(length, end - pos), iter);
fs/dax.c
1649
if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
fs/dax.c
1676
map_len, iter);
fs/dax.c
1679
map_len, iter);
fs/dax.c
1682
map_len, iter);
fs/dax.c
1707
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
fs/dax.c
1713
.len = iov_iter_count(iter),
fs/dax.c
1725
if (iov_iter_rw(iter) == WRITE) {
fs/dax.c
1736
iomi.status = dax_iomap_iter(&iomi, iter);
fs/dax.c
1767
const struct iomap_iter *iter)
fs/dax.c
1772
switch (iter->iomap.type) {
fs/dax.c
1778
error = copy_cow_page_dax(vmf, iter);
fs/dax.c
1806
const struct iomap_iter *iter, unsigned long *pfnp,
fs/dax.c
1809
const struct iomap *iomap = &iter->iomap;
fs/dax.c
1810
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/dax.c
1813
bool write = iter->flags & IOMAP_WRITE;
fs/dax.c
1821
return dax_fault_cow_page(vmf, iter);
fs/dax.c
1827
return dax_load_hole(xas, vmf, iter, entry);
fs/dax.c
1828
return dax_pmd_load_hole(xas, vmf, iter, entry);
fs/dax.c
1840
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
fs/dax.c
1849
if (dax_fault_is_synchronous(iter, vmf->vma))
fs/dax.c
1867
struct iomap_iter iter = {
fs/dax.c
1877
trace_dax_pte_fault(iter.inode, vmf, ret);
fs/dax.c
1883
if (iter.pos >= i_size_read(iter.inode)) {
fs/dax.c
1889
iter.flags |= IOMAP_WRITE;
fs/dax.c
1908
while ((error = iomap_iter(&iter, ops)) > 0) {
fs/dax.c
1909
if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
fs/dax.c
1910
iter.status = -EIO; /* fs corruption? */
fs/dax.c
1914
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
fs/dax.c
1916
(iter.iomap.flags & IOMAP_F_NEW)) {
fs/dax.c
1923
iter.status = iomap_iter_advance(&iter, PAGE_SIZE);
fs/dax.c
1934
trace_dax_pte_fault_done(iter.inode, vmf, ret);
fs/dax.c
1977
struct iomap_iter iter = {
fs/dax.c
1987
iter.flags |= IOMAP_WRITE;
fs/dax.c
1994
max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
fs/dax.c
1996
trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
fs/dax.c
2029
iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
fs/dax.c
2030
while (iomap_iter(&iter, ops) > 0) {
fs/dax.c
2031
if (iomap_length(&iter) < PMD_SIZE)
fs/dax.c
2034
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
fs/dax.c
2036
iter.status = iomap_iter_advance(&iter, PMD_SIZE);
fs/dax.c
2047
trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
fs/direct-io.c
105
struct iov_iter *iter;
fs/direct-io.c
1106
struct block_device *bdev, struct iov_iter *iter,
fs/direct-io.c
1114
const size_t count = iov_iter_count(iter);
fs/direct-io.c
1121
unsigned long align = offset | iov_iter_alignment(iter);
fs/direct-io.c
1124
if (iov_iter_rw(iter) == READ && !count)
fs/direct-io.c
1138
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
fs/direct-io.c
1142
dio->is_pinned = iov_iter_extract_will_pin(iter);
fs/direct-io.c
1146
if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
fs/direct-io.c
1159
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
fs/direct-io.c
1175
else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
fs/direct-io.c
1181
if (iov_iter_rw(iter) == WRITE) {
fs/direct-io.c
1193
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
fs/direct-io.c
1228
dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
fs/direct-io.c
1229
sdio.iter = iter;
fs/direct-io.c
1239
sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
fs/direct-io.c
1285
if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
fs/direct-io.c
1297
(iov_iter_rw(iter) == READ || dio->result == count))
fs/direct-io.c
1310
if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
fs/direct-io.c
172
ret = iov_iter_extract_pages(sdio->iter, &pages, LONG_MAX,
fs/dlm/dir.c
254
struct dlm_dir_dump *iter, *dd = NULL;
fs/dlm/dir.c
257
list_for_each_entry(iter, &ls->ls_dir_dump_list, list) {
fs/dlm/dir.c
258
if (iter->nodeid_init == nodeid) {
fs/dlm/dir.c
259
dd = iter;
fs/dlm/lock.c
1641
struct dlm_lkb *lkb = NULL, *iter;
fs/dlm/lock.c
1643
list_for_each_entry(iter, head, lkb_statequeue)
fs/dlm/lock.c
1644
if (iter->lkb_rqmode < mode) {
fs/dlm/lock.c
1645
lkb = iter;
fs/dlm/lock.c
1646
list_add_tail(new, &iter->lkb_statequeue);
fs/dlm/lock.c
5154
struct dlm_lkb *lkb = NULL, *iter;
fs/dlm/lock.c
5157
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
fs/dlm/lock.c
5158
if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
fs/dlm/lock.c
5159
hold_lkb(iter);
fs/dlm/lock.c
5160
lkb = iter;
fs/dlm/lock.c
5867
struct dlm_lkb *lkb = NULL, *iter;
fs/dlm/lock.c
5873
list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
fs/dlm/lock.c
5874
if (iter->lkb_resource->res_length != namelen)
fs/dlm/lock.c
5876
if (memcmp(iter->lkb_resource->res_name, name, namelen))
fs/dlm/lock.c
5878
if (iter->lkb_grmode != mode) {
fs/dlm/lock.c
5883
lkb = iter;
fs/dlm/lock.c
5884
list_del_init(&iter->lkb_ownqueue);
fs/dlm/lock.c
5885
clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags);
fs/dlm/lock.c
5886
*lkid = iter->lkb_id;
fs/dlm/plock.c
49
struct plock_op *op = NULL, *iter;
fs/dlm/plock.c
51
list_for_each_entry(iter, &recv_list, list) {
fs/dlm/plock.c
52
if (iter->info.fsid == info->fsid &&
fs/dlm/plock.c
523
struct plock_op *op = NULL, *iter;
fs/dlm/plock.c
53
iter->info.number == info->number &&
fs/dlm/plock.c
54
iter->info.owner == info->owner &&
fs/dlm/plock.c
548
list_for_each_entry(iter, &recv_list, list) {
fs/dlm/plock.c
549
if (!iter->info.wait &&
fs/dlm/plock.c
55
iter->info.pid == info->pid &&
fs/dlm/plock.c
550
iter->info.fsid == info.fsid) {
fs/dlm/plock.c
551
op = iter;
fs/dlm/plock.c
56
iter->info.start == info->start &&
fs/dlm/plock.c
57
iter->info.end == info->end &&
fs/dlm/plock.c
58
iter->info.ex == info->ex &&
fs/dlm/plock.c
59
iter->info.wait) {
fs/dlm/plock.c
60
op = iter;
fs/dlm/recover.c
733
struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
fs/dlm/recover.c
751
list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
fs/dlm/recover.c
752
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
fs/dlm/recover.c
757
if (iter->lkb_grmode > DLM_LOCK_CR) {
fs/dlm/recover.c
758
big_lkb = iter;
fs/dlm/recover.c
762
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
fs/dlm/recover.c
763
high_lkb = iter;
fs/dlm/recover.c
764
high_seq = iter->lkb_lvbseq;
fs/dlm/recover.c
768
list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
fs/dlm/recover.c
769
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
fs/dlm/recover.c
774
if (iter->lkb_grmode > DLM_LOCK_CR) {
fs/dlm/recover.c
775
big_lkb = iter;
fs/dlm/recover.c
779
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
fs/dlm/recover.c
780
high_lkb = iter;
fs/dlm/recover.c
781
high_seq = iter->lkb_lvbseq;
fs/erofs/data.c
279
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
fs/erofs/data.c
280
struct erofs_iomap_iter_ctx *ctx = iter->private;
fs/erofs/data.c
344
struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap);
fs/erofs/data.c
345
struct erofs_iomap_iter_ctx *ctx = iter->private;
fs/erofs/fileio.c
46
struct iov_iter iter;
fs/erofs/fileio.c
57
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
fs/erofs/fileio.c
60
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
fs/erofs/fscache.c
124
struct iov_iter *iter = &io->iter;
fs/erofs/fscache.c
131
while (iov_iter_count(iter)) {
fs/erofs/fscache.c
132
size_t orig_count = iov_iter_count(iter), len = orig_count;
fs/erofs/fscache.c
144
iov_iter_truncate(iter, len);
fs/erofs/fscache.c
146
ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL,
fs/erofs/fscache.c
154
if (WARN_ON(iov_iter_count(iter)))
fs/erofs/fscache.c
157
iov_iter_reexpand(iter, orig_count - len);
fs/erofs/fscache.c
17
struct iov_iter iter;
fs/erofs/fscache.c
198
iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt,
fs/erofs/fscache.c
228
iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages,
fs/erofs/fscache.c
260
struct iov_iter iter;
fs/erofs/fscache.c
269
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
fs/erofs/fscache.c
270
if (copy_to_iter(src, size, &iter) != size) {
fs/erofs/fscache.c
274
iov_iter_zero(PAGE_SIZE - size, &iter);
fs/erofs/fscache.c
282
struct iov_iter iter;
fs/erofs/fscache.c
284
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count);
fs/erofs/fscache.c
285
iov_iter_zero(count, &iter);
fs/erofs/fscache.c
304
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
fs/erofs/zdata.c
141
static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
fs/erofs/zdata.c
143
if (iter->bvpage)
fs/erofs/zdata.c
144
kunmap_local(iter->bvset);
fs/erofs/zdata.c
145
return iter->bvpage;
fs/erofs/zdata.c
148
static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
fs/erofs/zdata.c
152
struct page *nextpage = iter->bvset->nextpage;
fs/erofs/zdata.c
156
oldpage = z_erofs_bvec_iter_end(iter);
fs/erofs/zdata.c
157
iter->bvpage = nextpage;
fs/erofs/zdata.c
158
iter->bvset = kmap_local_page(nextpage);
fs/erofs/zdata.c
159
iter->nr = (PAGE_SIZE - base) / sizeof(struct z_erofs_bvec);
fs/erofs/zdata.c
160
iter->cur = 0;
fs/erofs/zdata.c
164
static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
fs/erofs/zdata.c
169
*iter = (struct z_erofs_bvec_iter) {
fs/erofs/zdata.c
174
while (cur > iter->nr) {
fs/erofs/zdata.c
175
cur -= iter->nr;
fs/erofs/zdata.c
176
z_erofs_bvset_flip(iter);
fs/erofs/zdata.c
178
iter->cur = cur;
fs/erofs/zdata.c
181
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
fs/erofs/zdata.c
186
if (iter->cur >= iter->nr) {
fs/erofs/zdata.c
196
DBG_BUGON(iter->bvset->nextpage);
fs/erofs/zdata.c
197
iter->bvset->nextpage = nextpage;
fs/erofs/zdata.c
198
z_erofs_bvset_flip(iter);
fs/erofs/zdata.c
200
iter->bvset->nextpage = NULL;
fs/erofs/zdata.c
203
iter->bvset->bvec[iter->cur++] = *bvec;
fs/erofs/zdata.c
207
static void z_erofs_bvec_dequeue(struct z_erofs_bvec_iter *iter,
fs/erofs/zdata.c
211
if (iter->cur == iter->nr)
fs/erofs/zdata.c
212
*old_bvpage = z_erofs_bvset_flip(iter);
fs/erofs/zdata.c
215
*bvec = iter->bvset->bvec[iter->cur++];
fs/exfat/file.c
631
static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/exfat/file.c
650
ret = generic_write_checks(iocb, iter);
fs/exfat/file.c
655
unsigned long align = pos | iov_iter_alignment(iter);
fs/exfat/file.c
675
ret = __generic_file_write_iter(iocb, iter);
fs/exfat/file.c
699
static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/exfat/file.c
706
return generic_file_read_iter(iocb, iter);
fs/exfat/inode.c
478
static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/exfat/inode.c
484
loff_t size = pos + iov_iter_count(iter);
fs/exfat/inode.c
485
int rw = iov_iter_rw(iter);
fs/exfat/inode.c
492
ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
fs/exfat/inode.c
514
iov_iter_revert(iter, size - ei->valid_size);
fs/exfat/inode.c
515
iov_iter_zero(size - ei->valid_size, iter);
fs/ext2/trace.h
12
TP_PROTO(struct kiocb *iocb, struct iov_iter *iter, ssize_t ret),
fs/ext2/trace.h
13
TP_ARGS(iocb, iter, ret),
fs/ext2/trace.h
29
__entry->count = iov_iter_count(iter);
fs/ext2/trace.h
47
TP_PROTO(struct kiocb *iocb, struct iov_iter *iter, ssize_t ret), \
fs/ext2/trace.h
48
TP_ARGS(iocb, iter, ret))
fs/ext4/fast_commit.c
1046
struct ext4_inode_info *iter;
fs/ext4/fast_commit.c
1060
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
fs/ext4/fast_commit.c
1061
ext4_set_inode_state(&iter->vfs_inode,
fs/ext4/fast_commit.c
1075
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
fs/ext4/fast_commit.c
1076
ext4_clear_inode_state(&iter->vfs_inode,
fs/ext4/fast_commit.c
1079
wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_FLUSHING_DATA);
fs/ext4/fast_commit.c
1081
wake_up_bit(&iter->i_flags, EXT4_STATE_FC_FLUSHING_DATA);
fs/ext4/fast_commit.c
1109
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
fs/ext4/fast_commit.c
1110
ext4_set_inode_state(&iter->vfs_inode,
fs/ext4/fast_commit.c
1147
list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
fs/ext4/fast_commit.c
1148
inode = &iter->vfs_inode;
fs/ext4/file.c
56
static bool ext4_should_use_dio(struct kiocb *iocb, struct iov_iter *iter)
fs/ext4/file.c
67
return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align);
fs/ext4/mballoc.c
4349
struct rb_node *iter;
fs/ext4/mballoc.c
4352
for (iter = ei->i_prealloc_node.rb_node; iter;
fs/ext4/mballoc.c
4353
iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) {
fs/ext4/mballoc.c
4354
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
4384
struct rb_node *iter;
fs/ext4/mballoc.c
4399
for (iter = ei->i_prealloc_node.rb_node; iter;
fs/ext4/mballoc.c
4400
iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
fs/ext4/mballoc.c
4401
tmp_pa_start, iter)) {
fs/ext4/mballoc.c
4402
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
4445
for (iter = &left_pa->pa_node.inode_node;;
fs/ext4/mballoc.c
4446
iter = rb_prev(iter)) {
fs/ext4/mballoc.c
4447
if (!iter) {
fs/ext4/mballoc.c
4452
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
4465
for (iter = &right_pa->pa_node.inode_node;;
fs/ext4/mballoc.c
4466
iter = rb_next(iter)) {
fs/ext4/mballoc.c
4467
if (!iter) {
fs/ext4/mballoc.c
4472
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
4889
struct rb_node *iter;
fs/ext4/mballoc.c
4915
for (iter = ei->i_prealloc_node.rb_node; iter;
fs/ext4/mballoc.c
4916
iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical,
fs/ext4/mballoc.c
4917
tmp_pa->pa_lstart, iter)) {
fs/ext4/mballoc.c
4918
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
4951
for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) {
fs/ext4/mballoc.c
4952
if (!iter) {
fs/ext4/mballoc.c
4959
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
5230
struct rb_node **iter = &root->rb_node, *parent = NULL;
fs/ext4/mballoc.c
5234
while (*iter) {
fs/ext4/mballoc.c
5235
iter_pa = rb_entry(*iter, struct ext4_prealloc_space,
fs/ext4/mballoc.c
5242
parent = *iter;
fs/ext4/mballoc.c
5244
iter = &((*iter)->rb_left);
fs/ext4/mballoc.c
5246
iter = &((*iter)->rb_right);
fs/ext4/mballoc.c
5249
rb_link_node(new, parent, iter);
fs/ext4/mballoc.c
5622
struct rb_node *iter;
fs/ext4/mballoc.c
5639
for (iter = rb_first(&ei->i_prealloc_node); iter;
fs/ext4/mballoc.c
5640
iter = rb_next(iter)) {
fs/ext4/mballoc.c
5641
pa = rb_entry(iter, struct ext4_prealloc_space,
fs/f2fs/file.c
4746
struct iov_iter *iter)
fs/f2fs/file.c
4753
if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
fs/f2fs/file.c
4767
align = iocb->ki_pos | iov_iter_alignment(iter);
fs/f2fs/file.c
4959
static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
fs/f2fs/file.c
4965
const size_t count = iov_iter_count(iter);
fs/f2fs/file.c
4986
if (fault_in_iov_iter_readable(iter, count))
fs/f2fs/file.c
5058
static void f2fs_dio_write_submit_io(const struct iomap_iter *iter,
fs/f2fs/file.c
5061
struct inode *inode = iter->inode;
fs/f2fs/gc.c
571
unsigned int iter = 0;
fs/f2fs/gc.c
606
iter++;
fs/f2fs/gc.c
615
if (iter < dirty_threshold) {
fs/f2fs/gc.c
638
unsigned int cost, iter;
fs/f2fs/gc.c
645
iter = 0;
fs/f2fs/gc.c
666
iter++;
fs/f2fs/gc.c
678
if (iter < dirty_threshold) {
fs/f2fs/segment.c
1766
struct discard_cmd *dc = NULL, *iter, *tmp;
fs/f2fs/segment.c
1773
list_for_each_entry_safe(iter, tmp, wait_list, list) {
fs/f2fs/segment.c
1774
if (iter->di.lstart + iter->di.len <= start ||
fs/f2fs/segment.c
1775
end <= iter->di.lstart)
fs/f2fs/segment.c
1777
if (iter->di.len < dpolicy->granularity)
fs/f2fs/segment.c
1779
if (iter->state == D_DONE && !iter->ref) {
fs/f2fs/segment.c
1780
wait_for_completion_io(&iter->wait);
fs/f2fs/segment.c
1781
if (!iter->error)
fs/f2fs/segment.c
1782
trimmed += iter->di.len;
fs/f2fs/segment.c
1783
__remove_discard_cmd(sbi, iter);
fs/f2fs/segment.c
1785
iter->ref++;
fs/f2fs/segment.c
1786
dc = iter;
fs/fat/inode.c
256
static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/fat/inode.c
261
size_t count = iov_iter_count(iter);
fs/fat/inode.c
265
if (iov_iter_rw(iter) == WRITE) {
fs/fat/inode.c
284
ret = blockdev_direct_IO(iocb, inode, iter, fat_get_block);
fs/fat/inode.c
285
if (ret < 0 && iov_iter_rw(iter) == WRITE)
fs/fuse/dev.c
841
struct iov_iter *iter)
fs/fuse/dev.c
845
cs->iter = iter;
fs/fuse/dev.c
917
err = iov_iter_get_pages2(cs->iter, &page, PAGE_SIZE, 1, &off);
fs/fuse/dev_uring.c
1017
struct iov_iter iter;
fs/fuse/dev_uring.c
1028
FUSE_URING_IOV_SEGS, &iov, &iter);
fs/fuse/dev_uring.c
584
struct iov_iter iter;
fs/fuse/dev_uring.c
594
&iter);
fs/fuse/dev_uring.c
598
fuse_copy_init(&cs, false, &iter);
fs/fuse/dev_uring.c
618
struct iov_iter iter;
fs/fuse/dev_uring.c
624
err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter);
fs/fuse/dev_uring.c
630
fuse_copy_init(&cs, true, &iter);
fs/fuse/file.c
1387
static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter)
fs/fuse/file.c
1391
return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode);
fs/fuse/file.c
1641
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
fs/fuse/file.c
1653
size_t count = iov_iter_count(iter);
fs/fuse/file.c
1662
max_pages = iov_iter_npages(iter, fc->max_pages);
fs/fuse/file.c
1690
io->should_dirty = !write && user_backed_iter(iter);
fs/fuse/file.c
1696
err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
fs/fuse/file.c
1716
iov_iter_revert(iter, nbytes);
fs/fuse/file.c
1726
iov_iter_revert(iter, nbytes - nres);
fs/fuse/file.c
1730
max_pages = iov_iter_npages(iter, fc->max_pages);
fs/fuse/file.c
1755
struct iov_iter *iter,
fs/fuse/file.c
1761
res = fuse_direct_io(io, iter, ppos, 0);
fs/fuse/file.c
1768
static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
fs/fuse/file.c
2817
fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/fuse/file.c
2826
size_t count = iov_iter_count(iter), shortened = 0;
fs/fuse/file.c
2834
if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
fs/fuse/file.c
2846
io->write = (iov_iter_rw(iter) == WRITE);
fs/fuse/file.c
2858
iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
fs/fuse/file.c
2859
shortened = count - iov_iter_count(iter);
fs/fuse/file.c
2879
if (iov_iter_rw(iter) == WRITE) {
fs/fuse/file.c
2880
ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
fs/fuse/file.c
2883
ret = __fuse_direct_read(io, iter, &pos);
fs/fuse/file.c
2885
iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
fs/fuse/file.c
2902
if (iov_iter_rw(iter) == WRITE) {
fs/fuse/file.c
924
static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
fs/fuse/file.c
930
loff_t pos = iter->pos;
fs/fuse/file.c
987
static int fuse_iomap_read_folio_range(const struct iomap_iter *iter,
fs/fuse/file.c
991
struct file *file = iter->private;
fs/fuse/fuse_dev_i.h
26
struct iov_iter *iter;
fs/fuse/fuse_dev_i.h
64
struct iov_iter *iter);
fs/fuse/fuse_i.h
1458
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
fs/fuse/fuse_i.h
1609
ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter);
fs/fuse/fuse_i.h
1610
ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *iter);
fs/fuse/passthrough.c
28
ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/fuse/passthrough.c
33
size_t count = iov_iter_count(iter);
fs/fuse/passthrough.c
47
ret = backing_file_read_iter(backing_file, iter, iocb, iocb->ki_flags,
fs/fuse/passthrough.c
54
struct iov_iter *iter)
fs/fuse/passthrough.c
60
size_t count = iov_iter_count(iter);
fs/fuse/passthrough.c
74
ret = backing_file_write_iter(backing_file, iter, iocb, iocb->ki_flags,
fs/gfs2/bmap.c
963
gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
fs/gfs2/bmap.c
965
struct inode *inode = iter->inode;
fs/gfs2/bmap.c
974
return iomap_get_folio(iter, pos, len);
fs/gfs2/bmap.c
981
folio = iomap_get_folio(iter, pos, len);
fs/gfs2/glock.c
2009
struct rhashtable_iter iter;
fs/gfs2/glock.c
2011
rhashtable_walk_enter(&gl_hash_table, &iter);
fs/gfs2/glock.c
2014
rhashtable_walk_start(&iter);
fs/gfs2/glock.c
2016
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
fs/gfs2/glock.c
2021
rhashtable_walk_stop(&iter);
fs/gfs2/glock.c
2024
rhashtable_walk_exit(&iter);
fs/gfs2/quota.c
1329
struct gfs2_quota_data *iter;
fs/gfs2/quota.c
1334
list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
fs/gfs2/quota.c
1335
if (qd_grab_sync(sdp, iter, sync_gen)) {
fs/gfs2/quota.c
1336
qda[num_qd++] = iter;
fs/gfs2/recovery.c
58
struct gfs2_revoke_replay *rr = NULL, *iter;
fs/gfs2/recovery.c
60
list_for_each_entry(iter, head, rr_list) {
fs/gfs2/recovery.c
61
if (iter->rr_blkno == blkno) {
fs/gfs2/recovery.c
62
rr = iter;
fs/gfs2/recovery.c
85
struct gfs2_revoke_replay *rr = NULL, *iter;
fs/gfs2/recovery.c
88
list_for_each_entry(iter, &jd->jd_revoke_list, rr_list) {
fs/gfs2/recovery.c
89
if (iter->rr_blkno == blkno) {
fs/gfs2/recovery.c
90
rr = iter;
fs/hfs/inode.c
125
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/hfs/inode.c
130
size_t count = iov_iter_count(iter);
fs/hfs/inode.c
133
ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block);
fs/hfs/inode.c
139
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
fs/hfsplus/inode.c
123
static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/hfsplus/inode.c
128
size_t count = iov_iter_count(iter);
fs/hfsplus/inode.c
131
ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block);
fs/hfsplus/inode.c
137
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
fs/iomap/bio.c
126
int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
fs/iomap/bio.c
129
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/iomap/bio.c
78
static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
fs/iomap/bio.c
82
const struct iomap *iomap = &iter->iomap;
fs/iomap/bio.c
83
loff_t pos = iter->pos;
fs/iomap/bio.c
85
loff_t length = iomap_length(iter);
fs/iomap/buffered-io.c
1000
if (folio_pos(folio) > iter->pos) {
fs/iomap/buffered-io.c
1001
len = min_t(u64, folio_pos(folio) - iter->pos,
fs/iomap/buffered-io.c
1002
iomap_length(iter));
fs/iomap/buffered-io.c
1003
status = iomap_iter_advance(iter, len);
fs/iomap/buffered-io.c
1004
len = iomap_length(iter);
fs/iomap/buffered-io.c
1009
pos = iomap_trim_folio_range(iter, folio, poffset, &len);
fs/iomap/buffered-io.c
1012
status = iomap_write_begin_inline(iter, folio);
fs/iomap/buffered-io.c
1016
status = __iomap_write_begin(iter, write_ops, len, folio);
fs/iomap/buffered-io.c
1026
__iomap_put_folio(iter, write_ops, 0, folio);
fs/iomap/buffered-io.c
1054
static bool iomap_write_end_inline(const struct iomap_iter *iter,
fs/iomap/buffered-io.c
1057
const struct iomap *iomap = &iter->iomap;
fs/iomap/buffered-io.c
1071
mark_inode_dirty(iter->inode);
fs/iomap/buffered-io.c
1079
static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
fs/iomap/buffered-io.c
1082
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/iomap/buffered-io.c
1083
loff_t pos = iter->pos;
fs/iomap/buffered-io.c
1086
return iomap_write_end_inline(iter, folio, pos, copied);
fs/iomap/buffered-io.c
1096
return __iomap_write_end(iter->inode, pos, len, copied, folio);
fs/iomap/buffered-io.c
1099
static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
fs/iomap/buffered-io.c
1104
struct address_space *mapping = iter->inode->i_mapping;
fs/iomap/buffered-io.c
1106
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
fs/iomap/buffered-io.c
1119
offset = iter->pos & (chunk - 1);
fs/iomap/buffered-io.c
1126
if (bytes > iomap_length(iter))
fs/iomap/buffered-io.c
1127
bytes = iomap_length(iter);
fs/iomap/buffered-io.c
1144
status = iomap_write_begin(iter, write_ops, &folio, &offset,
fs/iomap/buffered-io.c
1147
iomap_write_failed(iter->inode, iter->pos, bytes);
fs/iomap/buffered-io.c
1150
if (iter->iomap.flags & IOMAP_F_STALE)
fs/iomap/buffered-io.c
1153
pos = iter->pos;
fs/iomap/buffered-io.c
1159
written = iomap_write_end(iter, bytes, copied, folio) ?
fs/iomap/buffered-io.c
1169
old_size = iter->inode->i_size;
fs/iomap/buffered-io.c
1171
i_size_write(iter->inode, pos + written);
fs/iomap/buffered-io.c
1172
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
fs/iomap/buffered-io.c
1174
__iomap_put_folio(iter, write_ops, written, folio);
fs/iomap/buffered-io.c
1177
pagecache_isize_extended(iter->inode, old_size, pos);
fs/iomap/buffered-io.c
1187
iomap_write_failed(iter->inode, pos, bytes);
fs/iomap/buffered-io.c
1198
iomap_iter_advance(iter, written);
fs/iomap/buffered-io.c
1200
} while (iov_iter_count(i) && iomap_length(iter));
fs/iomap/buffered-io.c
1210
struct iomap_iter iter = {
fs/iomap/buffered-io.c
1220
iter.flags |= IOMAP_NOWAIT;
fs/iomap/buffered-io.c
1222
iter.flags |= IOMAP_DONTCACHE;
fs/iomap/buffered-io.c
1224
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/buffered-io.c
1225
iter.status = iomap_write_iter(&iter, i, write_ops);
fs/iomap/buffered-io.c
1227
if (unlikely(iter.pos == iocb->ki_pos))
fs/iomap/buffered-io.c
1229
ret = iter.pos - iocb->ki_pos;
fs/iomap/buffered-io.c
1230
iocb->ki_pos = iter.pos;
fs/iomap/buffered-io.c
1458
static int iomap_unshare_iter(struct iomap_iter *iter,
fs/iomap/buffered-io.c
1461
struct iomap *iomap = &iter->iomap;
fs/iomap/buffered-io.c
1462
u64 bytes = iomap_length(iter);
fs/iomap/buffered-io.c
1465
if (!iomap_want_unshare_iter(iter))
fs/iomap/buffered-io.c
1466
return iomap_iter_advance(iter, bytes);
fs/iomap/buffered-io.c
1474
status = iomap_write_begin(iter, write_ops, &folio, &offset,
fs/iomap/buffered-io.c
1481
ret = iomap_write_end(iter, bytes, bytes, folio);
fs/iomap/buffered-io.c
1482
__iomap_put_folio(iter, write_ops, bytes, folio);
fs/iomap/buffered-io.c
1488
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
fs/iomap/buffered-io.c
1490
status = iomap_iter_advance(iter, bytes);
fs/iomap/buffered-io.c
1493
} while ((bytes = iomap_length(iter)) > 0);
fs/iomap/buffered-io.c
1503
struct iomap_iter iter = {
fs/iomap/buffered-io.c
1514
iter.len = min(len, size - pos);
fs/iomap/buffered-io.c
1515
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/buffered-io.c
1516
iter.status = iomap_unshare_iter(&iter, write_ops);
fs/iomap/buffered-io.c
1535
static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
fs/iomap/buffered-io.c
1538
u64 bytes = iomap_length(iter);
fs/iomap/buffered-io.c
1547
status = iomap_write_begin(iter, write_ops, &folio, &offset,
fs/iomap/buffered-io.c
1551
if (iter->iomap.flags & IOMAP_F_STALE)
fs/iomap/buffered-io.c
1556
status = iomap_iter_advance_full(iter);
fs/iomap/buffered-io.c
1561
WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
fs/iomap/buffered-io.c
1563
trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
fs/iomap/buffered-io.c
1569
ret = iomap_write_end(iter, bytes, bytes, folio);
fs/iomap/buffered-io.c
1570
__iomap_put_folio(iter, write_ops, bytes, folio);
fs/iomap/buffered-io.c
1574
status = iomap_iter_advance(iter, bytes);
fs/iomap/buffered-io.c
1577
} while ((bytes = iomap_length(iter)) > 0);
fs/iomap/buffered-io.c
1598
struct iomap_iter *iter,
fs/iomap/buffered-io.c
1603
struct address_space *mapping = iter->inode->i_mapping;
fs/iomap/buffered-io.c
1608
if (!iter->fbatch) {
fs/iomap/buffered-io.c
1613
count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
fs/iomap/buffered-io.c
1626
struct iomap_iter iter = {
fs/iomap/buffered-io.c
1645
range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
fs/iomap/buffered-io.c
1646
iter.pos + iter.len - 1);
fs/iomap/buffered-io.c
1647
while ((ret = iomap_iter(&iter, ops)) > 0) {
fs/iomap/buffered-io.c
1648
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
fs/iomap/buffered-io.c
1650
if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
fs/iomap/buffered-io.c
1654
if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
fs/iomap/buffered-io.c
1661
status = iomap_zero_iter_flush_and_stale(&iter);
fs/iomap/buffered-io.c
1663
status = iomap_iter_advance_full(&iter);
fs/iomap/buffered-io.c
1665
iter.status = status;
fs/iomap/buffered-io.c
1669
iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
fs/iomap/buffered-io.c
1691
static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
fs/iomap/buffered-io.c
1694
loff_t length = iomap_length(iter);
fs/iomap/buffered-io.c
1697
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
fs/iomap/buffered-io.c
1698
ret = __block_write_begin_int(folio, iter->pos, length, NULL,
fs/iomap/buffered-io.c
1699
&iter->iomap);
fs/iomap/buffered-io.c
1708
return iomap_iter_advance(iter, length);
fs/iomap/buffered-io.c
1714
struct iomap_iter iter = {
fs/iomap/buffered-io.c
1723
ret = folio_mkwrite_check_truncate(folio, iter.inode);
fs/iomap/buffered-io.c
1726
iter.pos = folio_pos(folio);
fs/iomap/buffered-io.c
1727
iter.len = ret;
fs/iomap/buffered-io.c
1728
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/buffered-io.c
1729
iter.status = iomap_folio_mkwrite_iter(&iter, folio);
fs/iomap/buffered-io.c
351
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
fs/iomap/buffered-io.c
354
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/iomap/buffered-io.c
358
pos >= i_size_read(iter->inode);
fs/iomap/buffered-io.c
370
static int iomap_read_inline_data(const struct iomap_iter *iter,
fs/iomap/buffered-io.c
373
const struct iomap *iomap = iomap_iter_srcmap(iter);
fs/iomap/buffered-io.c
374
size_t size = i_size_read(iter->inode) - iomap->offset;
fs/iomap/buffered-io.c
384
fserror_report_io(iter->inode, FSERR_BUFFERED_READ,
fs/iomap/buffered-io.c
389
ifs_alloc(iter->inode, folio, iter->flags);
fs/iomap/buffered-io.c
509
static int iomap_read_folio_iter(struct iomap_iter *iter,
fs/iomap/buffered-io.c
512
const struct iomap *iomap = &iter->iomap;
fs/iomap/buffered-io.c
513
loff_t pos = iter->pos;
fs/iomap/buffered-io.c
514
loff_t length = iomap_length(iter);
fs/iomap/buffered-io.c
523
ret = iomap_read_inline_data(iter, folio);
fs/iomap/buffered-io.c
526
return iomap_iter_advance(iter, length);
fs/iomap/buffered-io.c
529
ifs = ifs_alloc(iter->inode, folio, iter->flags);
fs/iomap/buffered-io.c
533
iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
fs/iomap/buffered-io.c
536
pos_diff = pos - iter->pos;
fs/iomap/buffered-io.c
540
ret = iomap_iter_advance(iter, pos_diff);
fs/iomap/buffered-io.c
548
if (iomap_block_needs_zeroing(iter, pos)) {
fs/iomap/buffered-io.c
554
ret = ctx->ops->read_folio_range(iter, ctx, plen);
fs/iomap/buffered-io.c
556
fserror_report_io(iter->inode,
fs/iomap/buffered-io.c
576
ret = iomap_iter_advance(iter, plen);
fs/iomap/buffered-io.c
580
pos = iter->pos;
fs/iomap/buffered-io.c
589
struct iomap_iter iter = {
fs/iomap/buffered-io.c
598
trace_iomap_readpage(iter.inode, 1);
fs/iomap/buffered-io.c
600
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/buffered-io.c
601
iter.status = iomap_read_folio_iter(&iter, ctx,
fs/iomap/buffered-io.c
612
static int iomap_readahead_iter(struct iomap_iter *iter,
fs/iomap/buffered-io.c
617
while (iomap_length(iter)) {
fs/iomap/buffered-io.c
619
offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
fs/iomap/buffered-io.c
629
ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
fs/iomap/buffered-io.c
657
struct iomap_iter iter = {
fs/iomap/buffered-io.c
667
while (iomap_iter(&iter, ops) > 0)
fs/iomap/buffered-io.c
668
iter.status = iomap_readahead_iter(&iter, ctx,
fs/iomap/buffered-io.c
715
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
fs/iomap/buffered-io.c
719
if (iter->flags & IOMAP_NOWAIT)
fs/iomap/buffered-io.c
721
if (iter->flags & IOMAP_DONTCACHE)
fs/iomap/buffered-io.c
725
return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
fs/iomap/buffered-io.c
726
fgp, mapping_gfp_mask(iter->inode->i_mapping));
fs/iomap/buffered-io.c
789
static int __iomap_write_begin(const struct iomap_iter *iter,
fs/iomap/buffered-io.c
794
loff_t pos = iter->pos;
fs/iomap/buffered-io.c
795
loff_t block_size = i_blocksize(iter->inode);
fs/iomap/buffered-io.c
798
unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
fs/iomap/buffered-io.c
809
if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
fs/iomap/buffered-io.c
813
ifs = ifs_alloc(iter->inode, folio, iter->flags);
fs/iomap/buffered-io.c
814
if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
fs/iomap/buffered-io.c
821
iomap_adjust_read_range(iter->inode, folio, &block_start,
fs/iomap/buffered-io.c
830
if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
fs/iomap/buffered-io.c
834
if (iomap_block_needs_zeroing(iter, block_start)) {
fs/iomap/buffered-io.c
835
if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
fs/iomap/buffered-io.c
841
if (iter->flags & IOMAP_NOWAIT)
fs/iomap/buffered-io.c
845
status = write_ops->read_folio_range(iter,
fs/iomap/buffered-io.c
848
status = iomap_bio_read_folio_range_sync(iter,
fs/iomap/buffered-io.c
851
fserror_report_io(iter->inode,
fs/iomap/buffered-io.c
863
static struct folio *__iomap_get_folio(struct iomap_iter *iter,
fs/iomap/buffered-io.c
866
loff_t pos = iter->pos;
fs/iomap/buffered-io.c
868
if (!mapping_large_folio_support(iter->inode->i_mapping))
fs/iomap/buffered-io.c
871
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
fs/iomap/buffered-io.c
872
struct folio *folio = folio_batch_next(iter->fbatch);
fs/iomap/buffered-io.c
883
if (unlikely(folio->mapping != iter->inode->i_mapping)) {
fs/iomap/buffered-io.c
884
iter->iomap.flags |= IOMAP_F_STALE;
fs/iomap/buffered-io.c
895
return write_ops->get_folio(iter, pos, len);
fs/iomap/buffered-io.c
896
return iomap_get_folio(iter, pos, len);
fs/iomap/buffered-io.c
899
static void __iomap_put_folio(struct iomap_iter *iter,
fs/iomap/buffered-io.c
903
loff_t pos = iter->pos;
fs/iomap/buffered-io.c
906
write_ops->put_folio(iter->inode, pos, ret, folio);
fs/iomap/buffered-io.c
914
static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
fs/iomap/buffered-io.c
917
loff_t pos = iter->pos;
fs/iomap/buffered-io.c
929
static int iomap_write_begin_inline(const struct iomap_iter *iter,
fs/iomap/buffered-io.c
933
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
fs/iomap/buffered-io.c
935
return iomap_read_inline_data(iter, folio);
fs/iomap/buffered-io.c
943
static int iomap_write_begin(struct iomap_iter *iter,
fs/iomap/buffered-io.c
947
const struct iomap *srcmap = iomap_iter_srcmap(iter);
fs/iomap/buffered-io.c
949
u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
fs/iomap/buffered-io.c
960
folio = __iomap_get_folio(iter, write_ops, len);
fs/iomap/buffered-io.c
969
WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
fs/iomap/buffered-io.c
984
bool iomap_valid = write_ops->iomap_valid(iter->inode,
fs/iomap/buffered-io.c
985
&iter->iomap);
fs/iomap/buffered-io.c
987
iter->iomap.flags |= IOMAP_F_STALE;
fs/iomap/direct-io.c
294
static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
fs/iomap/direct-io.c
312
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
fs/iomap/direct-io.c
316
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
fs/iomap/direct-io.c
326
iomap_dio_submit_bio(iter, dio, bio, pos);
fs/iomap/direct-io.c
331
static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
fs/iomap/direct-io.c
340
nr_vecs = bio_iov_bounce_nr_vecs(dio->submit.iter, op);
fs/iomap/direct-io.c
342
nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
fs/iomap/direct-io.c
344
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
fs/iomap/direct-io.c
345
fscrypt_set_bio_crypt_ctx(bio, iter->inode,
fs/iomap/direct-io.c
346
pos >> iter->inode->i_blkbits, GFP_KERNEL);
fs/iomap/direct-io.c
347
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
fs/iomap/direct-io.c
348
bio->bi_write_hint = iter->inode->i_write_hint;
fs/iomap/direct-io.c
354
ret = bio_iov_iter_bounce(bio, dio->submit.iter);
fs/iomap/direct-io.c
356
ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
fs/iomap/direct-io.c
366
if ((op & REQ_ATOMIC) && WARN_ON_ONCE(ret != iomap_length(iter))) {
fs/iomap/direct-io.c
380
if (iov_iter_count(dio->submit.iter))
fs/iomap/direct-io.c
382
iomap_dio_submit_bio(iter, dio, bio, pos);
fs/iomap/direct-io.c
390
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
fs/iomap/direct-io.c
392
const struct iomap *iomap = &iter->iomap;
fs/iomap/direct-io.c
393
struct inode *inode = iter->inode;
fs/iomap/direct-io.c
395
const loff_t length = iomap_length(iter);
fs/iomap/direct-io.c
396
loff_t pos = iter->pos;
fs/iomap/direct-io.c
42
struct iov_iter *iter;
fs/iomap/direct-io.c
442
if (length != iter->len)
fs/iomap/direct-io.c
504
orig_count = iov_iter_count(dio->submit.iter);
fs/iomap/direct-io.c
505
iov_iter_truncate(dio->submit.iter, length);
fs/iomap/direct-io.c
507
if (!iov_iter_count(dio->submit.iter))
fs/iomap/direct-io.c
522
ret = iomap_dio_zero(iter, dio, pos - pad, pad);
fs/iomap/direct-io.c
53
static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
fs/iomap/direct-io.c
535
ret = iomap_dio_bio_iter_one(iter, dio, pos, alignment, bio_opf);
fs/iomap/direct-io.c
549
} while (iov_iter_count(dio->submit.iter));
fs/iomap/direct-io.c
562
ret = iomap_dio_zero(iter, dio, pos,
fs/iomap/direct-io.c
567
iov_iter_reexpand(dio->submit.iter, orig_count - copied);
fs/iomap/direct-io.c
569
return iomap_iter_advance(iter, copied);
fs/iomap/direct-io.c
57
return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
fs/iomap/direct-io.c
573
static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
fs/iomap/direct-io.c
575
loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
fs/iomap/direct-io.c
580
return iomap_iter_advance(iter, length);
fs/iomap/direct-io.c
586
struct iov_iter *iter = dio->submit.iter;
fs/iomap/direct-io.c
59
return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
fs/iomap/direct-io.c
603
copied = copy_from_iter(inline_data, length, iter);
fs/iomap/direct-io.c
610
copied = copy_to_iter(inline_data, length, iter);
fs/iomap/direct-io.c
618
static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
fs/iomap/direct-io.c
62
static void iomap_dio_submit_bio(const struct iomap_iter *iter,
fs/iomap/direct-io.c
620
switch (iter->iomap.type) {
fs/iomap/direct-io.c
624
return iomap_dio_hole_iter(iter, dio);
fs/iomap/direct-io.c
627
return iomap_dio_hole_iter(iter, dio);
fs/iomap/direct-io.c
628
return iomap_dio_bio_iter(iter, dio);
fs/iomap/direct-io.c
630
return iomap_dio_bio_iter(iter, dio);
fs/iomap/direct-io.c
632
return iomap_dio_inline_iter(iter, dio);
fs/iomap/direct-io.c
670
__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
fs/iomap/direct-io.c
678
.len = iov_iter_count(iter),
fs/iomap/direct-io.c
688
trace_iomap_dio_rw_begin(iocb, iter, dio_flags, done_before);
fs/iomap/direct-io.c
706
dio->submit.iter = iter;
fs/iomap/direct-io.c
712
if (iov_iter_rw(iter) == READ) {
fs/iomap/direct-io.c
716
if (user_backed_iter(iter))
fs/iomap/direct-io.c
76
dio->dops->submit_io(iter, bio, pos);
fs/iomap/direct-io.c
78
WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
fs/iomap/direct-io.c
811
if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
fs/iomap/direct-io.c
812
iov_iter_revert(iter, iomi.pos - dio->i_size);
fs/iomap/direct-io.c
884
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
fs/iomap/direct-io.c
890
dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
fs/iomap/fiemap.c
110
while ((ret = iomap_iter(&iter, ops)) > 0) {
fs/iomap/fiemap.c
111
if (iter.iomap.type == IOMAP_MAPPED)
fs/iomap/fiemap.c
112
bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift;
fs/iomap/fiemap.c
39
static int iomap_fiemap_iter(struct iomap_iter *iter,
fs/iomap/fiemap.c
44
if (iter->iomap.type == IOMAP_HOLE)
fs/iomap/fiemap.c
48
*prev = iter->iomap;
fs/iomap/fiemap.c
55
return iomap_iter_advance_full(iter);
fs/iomap/fiemap.c
61
struct iomap_iter iter = {
fs/iomap/fiemap.c
72
ret = fiemap_prep(inode, fi, start, &iter.len, 0);
fs/iomap/fiemap.c
76
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/fiemap.c
77
iter.status = iomap_fiemap_iter(&iter, fi, &prev);
fs/iomap/fiemap.c
97
struct iomap_iter iter = {
fs/iomap/internal.h
10
int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
fs/iomap/internal.h
13
static inline int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
fs/iomap/iter.c
100
ret = iter->status;
fs/iomap/iter.c
101
else if (iter->len == 0 || (!advanced && !stale))
fs/iomap/iter.c
105
iomap_iter_reset_iomap(iter);
fs/iomap/iter.c
11
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
fs/iomap/iter.c
110
ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
fs/iomap/iter.c
111
&iter->iomap, &iter->srcmap);
fs/iomap/iter.c
114
iomap_iter_done(iter);
fs/iomap/iter.c
12
folio_batch_release(iter->fbatch);
fs/iomap/iter.c
13
folio_batch_reinit(iter->fbatch);
fs/iomap/iter.c
14
iter->iomap.flags &= ~IOMAP_F_FOLIO_BATCH;
fs/iomap/iter.c
17
iter->status = 0;
fs/iomap/iter.c
18
memset(&iter->iomap, 0, sizeof(iter->iomap));
fs/iomap/iter.c
19
memset(&iter->srcmap, 0, sizeof(iter->srcmap));
fs/iomap/iter.c
23
int iomap_iter_advance(struct iomap_iter *iter, u64 count)
fs/iomap/iter.c
25
if (WARN_ON_ONCE(count > iomap_length(iter)))
fs/iomap/iter.c
27
iter->pos += count;
fs/iomap/iter.c
28
iter->len -= count;
fs/iomap/iter.c
32
static inline void iomap_iter_done(struct iomap_iter *iter)
fs/iomap/iter.c
34
WARN_ON_ONCE(iter->iomap.offset > iter->pos);
fs/iomap/iter.c
35
WARN_ON_ONCE(iter->iomap.length == 0);
fs/iomap/iter.c
36
WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
fs/iomap/iter.c
37
WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE);
fs/iomap/iter.c
39
iter->iter_start_pos = iter->pos;
fs/iomap/iter.c
41
trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
fs/iomap/iter.c
42
if (iter->srcmap.type != IOMAP_HOLE)
fs/iomap/iter.c
43
trace_iomap_iter_srcmap(iter->inode, &iter->srcmap);
fs/iomap/iter.c
61
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
fs/iomap/iter.c
63
bool stale = iter->iomap.flags & IOMAP_F_STALE;
fs/iomap/iter.c
68
trace_iomap_iter(iter, ops, _RET_IP_);
fs/iomap/iter.c
70
if (!iter->iomap.length)
fs/iomap/iter.c
77
advanced = iter->pos - iter->iter_start_pos;
fs/iomap/iter.c
78
olen = iter->len + advanced;
fs/iomap/iter.c
81
ret = ops->iomap_end(iter->inode, iter->iter_start_pos,
fs/iomap/iter.c
82
iomap_length_trim(iter, iter->iter_start_pos,
fs/iomap/iter.c
84
advanced, iter->flags, &iter->iomap);
fs/iomap/iter.c
9
static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
fs/iomap/iter.c
90
if (WARN_ON_ONCE(iter->status > 0))
fs/iomap/iter.c
91
iter->status = -EIO;
fs/iomap/iter.c
99
if (iter->status < 0)
fs/iomap/seek.c
12
loff_t length = iomap_length(iter);
fs/iomap/seek.c
14
switch (iter->iomap.type) {
fs/iomap/seek.c
16
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
fs/iomap/seek.c
17
iter->pos, iter->pos + length, SEEK_HOLE);
fs/iomap/seek.c
18
if (*hole_pos == iter->pos + length)
fs/iomap/seek.c
19
return iomap_iter_advance(iter, length);
fs/iomap/seek.c
22
*hole_pos = iter->pos;
fs/iomap/seek.c
25
return iomap_iter_advance(iter, length);
fs/iomap/seek.c
33
struct iomap_iter iter = {
fs/iomap/seek.c
44
iter.len = size - pos;
fs/iomap/seek.c
45
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/seek.c
46
iter.status = iomap_seek_hole_iter(&iter, &pos);
fs/iomap/seek.c
49
if (iter.len) /* found hole before EOF */
fs/iomap/seek.c
55
static int iomap_seek_data_iter(struct iomap_iter *iter,
fs/iomap/seek.c
58
loff_t length = iomap_length(iter);
fs/iomap/seek.c
60
switch (iter->iomap.type) {
fs/iomap/seek.c
62
return iomap_iter_advance(iter, length);
fs/iomap/seek.c
64
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
fs/iomap/seek.c
65
iter->pos, iter->pos + length, SEEK_DATA);
fs/iomap/seek.c
67
return iomap_iter_advance(iter, length);
fs/iomap/seek.c
70
*hole_pos = iter->pos;
fs/iomap/seek.c
79
struct iomap_iter iter = {
fs/iomap/seek.c
9
static int iomap_seek_hole_iter(struct iomap_iter *iter,
fs/iomap/seek.c
90
iter.len = size - pos;
fs/iomap/seek.c
91
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/seek.c
92
iter.status = iomap_seek_data_iter(&iter, &pos);
fs/iomap/seek.c
95
if (iter.len) /* found data before EOF */
fs/iomap/swapfile.c
133
return iomap_iter_advance_full(iter);
fs/iomap/swapfile.c
145
struct iomap_iter iter = {
fs/iomap/swapfile.c
166
while ((ret = iomap_iter(&iter, ops)) > 0)
fs/iomap/swapfile.c
167
iter.status = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
fs/iomap/swapfile.c
94
static int iomap_swapfile_iter(struct iomap_iter *iter,
fs/iomap/trace.h
219
TP_PROTO(struct iomap_iter *iter, const void *ops,
fs/iomap/trace.h
221
TP_ARGS(iter, ops, caller),
fs/iomap/trace.h
233
__entry->dev = iter->inode->i_sb->s_dev;
fs/iomap/trace.h
234
__entry->ino = iter->inode->i_ino;
fs/iomap/trace.h
235
__entry->pos = iter->pos;
fs/iomap/trace.h
236
__entry->length = iomap_length(iter);
fs/iomap/trace.h
237
__entry->status = iter->status;
fs/iomap/trace.h
238
__entry->flags = iter->flags;
fs/iomap/trace.h
255
TP_PROTO(struct kiocb *iocb, struct iov_iter *iter,
fs/iomap/trace.h
257
TP_ARGS(iocb, iter, dio_flags, done_before),
fs/iomap/trace.h
274
__entry->count = iov_iter_count(iter);
fs/jfs/inode.c
331
static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/jfs/inode.c
336
size_t count = iov_iter_count(iter);
fs/jfs/inode.c
339
ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block);
fs/jfs/inode.c
345
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
fs/kernfs/file.c
239
static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/kernfs/file.c
242
ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
fs/kernfs/file.c
279
if (copy_to_iter(buf, len, iter) != len) {
fs/kernfs/file.c
294
static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/kernfs/file.c
297
return seq_read_iter(iocb, iter);
fs/kernfs/file.c
298
return kernfs_file_read_iter(iocb, iter);
fs/kernfs/file.c
311
static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/kernfs/file.c
314
ssize_t len = iov_iter_count(iter);
fs/kernfs/file.c
333
if (copy_from_iter(buf, len, iter) != len) {
fs/libfs.c
1650
ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/libfs.c
2082
ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
fs/locks.c
2936
struct locks_iterator *iter = f->private;
fs/locks.c
2953
lock_get_status(f, cur, iter->li_pos, "-> ", level);
fs/locks.c
2955
lock_get_status(f, cur, iter->li_pos, "", level);
fs/locks.c
3019
struct locks_iterator *iter = f->private;
fs/locks.c
3021
iter->li_pos = *pos + 1;
fs/locks.c
3024
return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
fs/locks.c
3029
struct locks_iterator *iter = f->private;
fs/locks.c
3031
++iter->li_pos;
fs/locks.c
3032
return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
fs/netfs/buffered_read.c
114
size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
fs/netfs/buffered_read.c
123
subreq->io_iter = rreq->buffer.iter;
fs/netfs/buffered_read.c
456
iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len);
fs/netfs/buffered_read.c
783
ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/netfs/buffered_read.c
795
ret = filemap_read(iocb, iter, 0);
fs/netfs/buffered_read.c
823
ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/netfs/buffered_read.c
829
return netfs_unbuffered_read_iter(iocb, iter);
fs/netfs/buffered_read.c
831
return netfs_buffered_read_iter(iocb, iter);
fs/netfs/buffered_write.c
106
ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
fs/netfs/buffered_write.c
118
.range_end = iocb->ki_pos + iter->count,
fs/netfs/buffered_write.c
132
ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
fs/netfs/buffered_write.c
138
wreq = netfs_begin_writethrough(iocb, iter->count);
fs/netfs/buffered_write.c
162
part = min(max_chunk - offset, iov_iter_count(iter));
fs/netfs/buffered_write.c
176
if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
fs/netfs/buffered_write.c
222
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
fs/netfs/buffered_write.c
236
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
fs/netfs/buffered_write.c
248
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
fs/netfs/buffered_write.c
253
iov_iter_revert(iter, copied);
fs/netfs/buffered_write.c
283
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
fs/netfs/buffered_write.c
295
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
fs/netfs/buffered_write.c
307
iov_iter_revert(iter, copied);
fs/netfs/buffered_write.c
325
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
fs/netfs/buffered_write.c
379
} while (iov_iter_count(iter));
fs/netfs/direct_read.c
169
ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
fs/netfs/direct_read.c
173
size_t orig_count = iov_iter_count(iter);
fs/netfs/direct_read.c
205
if (user_backed_iter(iter)) {
fs/netfs/direct_read.c
206
ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
fs/netfs/direct_read.c
209
rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
fs/netfs/direct_read.c
211
rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
fs/netfs/direct_read.c
212
rreq->len = iov_iter_count(&rreq->buffer.iter);
fs/netfs/direct_read.c
214
rreq->buffer.iter = *iter;
fs/netfs/direct_read.c
217
iov_iter_advance(iter, orig_count);
fs/netfs/direct_read.c
256
ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/netfs/direct_read.c
261
if (!iter->count)
fs/netfs/direct_read.c
266
ret = netfs_unbuffered_read_iter_locked(iocb, iter);
fs/netfs/direct_read.c
28
size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
fs/netfs/direct_read.c
39
subreq->io_iter = rreq->buffer.iter;
fs/netfs/direct_read.c
41
iov_iter_advance(&rreq->buffer.iter, subreq->len);
fs/netfs/direct_write.c
170
iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
fs/netfs/direct_write.c
179
subreq->io_iter = wreq->buffer.iter;
fs/netfs/direct_write.c
219
ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
fs/netfs/direct_write.c
224
unsigned long long end = start + iov_iter_count(iter);
fs/netfs/direct_write.c
226
size_t len = iov_iter_count(iter);
fs/netfs/direct_write.c
259
if (user_backed_iter(iter)) {
fs/netfs/direct_write.c
260
n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0);
fs/netfs/direct_write.c
265
wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec;
fs/netfs/direct_write.c
267
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
fs/netfs/direct_write.c
274
wreq->buffer.iter = *iter;
fs/netfs/direct_write.c
277
wreq->len = iov_iter_count(&wreq->buffer.iter);
fs/netfs/direct_write.c
76
iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
fs/netfs/fscache_io.c
218
struct iov_iter iter;
fs/netfs/fscache_io.c
251
iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
fs/netfs/fscache_io.c
252
fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
fs/netfs/iterator.c
109
static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
fs/netfs/iterator.c
112
const struct bio_vec *bvecs = iter->bvec;
fs/netfs/iterator.c
113
unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
fs/netfs/iterator.c
114
size_t len, span = 0, n = iter->count;
fs/netfs/iterator.c
115
size_t skip = iter->iov_offset + start_offset;
fs/netfs/iterator.c
117
if (WARN_ON(!iov_iter_is_bvec(iter)) ||
fs/netfs/iterator.c
150
static size_t netfs_limit_kvec(const struct iov_iter *iter, size_t start_offset,
fs/netfs/iterator.c
153
const struct kvec *kvecs = iter->kvec;
fs/netfs/iterator.c
154
unsigned int nkv = iter->nr_segs, ix = 0, nsegs = 0;
fs/netfs/iterator.c
155
size_t len, span = 0, n = iter->count;
fs/netfs/iterator.c
156
size_t skip = iter->iov_offset + start_offset;
fs/netfs/iterator.c
158
if (WARN_ON(!iov_iter_is_kvec(iter)) ||
fs/netfs/iterator.c
192
static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset,
fs/netfs/iterator.c
197
loff_t pos = iter->xarray_start + iter->iov_offset;
fs/netfs/iterator.c
199
size_t span = 0, n = iter->count;
fs/netfs/iterator.c
201
XA_STATE(xas, iter->xarray, index);
fs/netfs/iterator.c
203
if (WARN_ON(!iov_iter_is_xarray(iter)) ||
fs/netfs/iterator.c
237
static size_t netfs_limit_folioq(const struct iov_iter *iter, size_t start_offset,
fs/netfs/iterator.c
240
const struct folio_queue *folioq = iter->folioq;
fs/netfs/iterator.c
242
unsigned int slot = iter->folioq_slot;
fs/netfs/iterator.c
243
size_t span = 0, n = iter->count;
fs/netfs/iterator.c
245
if (WARN_ON(!iov_iter_is_folioq(iter)) ||
fs/netfs/iterator.c
256
start_offset += iter->iov_offset;
fs/netfs/iterator.c
280
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
fs/netfs/iterator.c
283
if (iov_iter_is_folioq(iter))
fs/netfs/iterator.c
284
return netfs_limit_folioq(iter, start_offset, max_size, max_segs);
fs/netfs/iterator.c
285
if (iov_iter_is_bvec(iter))
fs/netfs/iterator.c
286
return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
fs/netfs/iterator.c
287
if (iov_iter_is_xarray(iter))
fs/netfs/iterator.c
288
return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
fs/netfs/iterator.c
289
if (iov_iter_is_kvec(iter))
fs/netfs/iterator.c
290
return netfs_limit_kvec(iter, start_offset, max_size, max_segs);
fs/netfs/read_pgpriv2.c
73
creq->buffer.iter.iov_offset = cache->submit_off;
fs/netfs/read_pgpriv2.c
86
creq->buffer.iter.iov_offset = 0;
fs/netfs/read_single.c
103
subreq->io_iter = rreq->buffer.iter;
fs/netfs/read_single.c
165
ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter)
fs/netfs/read_single.c
171
rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter),
fs/netfs/read_single.c
183
rreq->buffer.iter = *iter;
fs/netfs/rolling_buffer.c
100
roll->iter.folioq_slot = 0;
fs/netfs/rolling_buffer.c
145
WRITE_ONCE(roll->iter.count, roll->iter.count + size);
fs/netfs/rolling_buffer.c
170
WRITE_ONCE(roll->iter.count, roll->iter.count + size);
fs/netfs/rolling_buffer.c
73
iov_iter_folio_queue(&roll->iter, direction, fq, 0, 0, 0);
fs/netfs/rolling_buffer.c
97
if (roll->iter.folioq == head &&
fs/netfs/rolling_buffer.c
98
roll->iter.folioq_slot == folioq_nr_slots(head)) {
fs/netfs/rolling_buffer.c
99
roll->iter.folioq = fq;
fs/netfs/write_issue.c
162
struct iov_iter *wreq_iter = &wreq->buffer.iter;
fs/netfs/write_issue.c
650
wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end);
fs/netfs/write_issue.c
802
wreq->buffer.iter.iov_offset = 0;
fs/netfs/write_issue.c
824
struct iov_iter *iter)
fs/netfs/write_issue.c
829
size_t size = iov_iter_count(iter);
fs/netfs/write_issue.c
832
if (WARN_ON_ONCE(!iov_iter_is_folioq(iter)))
fs/netfs/write_issue.c
857
for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) {
fs/nfs/direct.c
1001
file, iov_iter_count(iter), (long long) iocb->ki_pos);
fs/nfs/direct.c
1005
result = iov_iter_count(iter);
fs/nfs/direct.c
1007
result = generic_write_checks(iocb, iter);
fs/nfs/direct.c
1014
end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
fs/nfs/direct.c
1039
requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
fs/nfs/direct.c
1051
requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
fs/nfs/direct.c
1070
iov_iter_revert(iter, requested);
fs/nfs/direct.c
155
int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
fs/nfs/direct.c
159
if (iov_iter_rw(iter) == READ)
fs/nfs/direct.c
160
ret = nfs_file_direct_read(iocb, iter, true);
fs/nfs/direct.c
162
ret = nfs_file_direct_write(iocb, iter, true);
fs/nfs/direct.c
341
struct iov_iter *iter,
fs/nfs/direct.c
356
while (iov_iter_count(iter)) {
fs/nfs/direct.c
362
result = iov_iter_get_pages_alloc2(iter, &pagevec,
fs/nfs/direct.c
432
ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
fs/nfs/direct.c
441
size_t count = iov_iter_count(iter);
fs/nfs/direct.c
472
if (user_backed_iter(iter))
fs/nfs/direct.c
487
requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
fs/nfs/direct.c
498
iov_iter_revert(iter, requested);
fs/nfs/direct.c
864
struct iov_iter *iter,
fs/nfs/direct.c
883
NFS_I(inode)->write_io += iov_iter_count(iter);
fs/nfs/direct.c
884
while (iov_iter_count(iter)) {
fs/nfs/direct.c
890
result = iov_iter_get_pages_alloc2(iter, &pagevec,
fs/nfs/direct.c
988
ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
fs/nfs/localio.c
408
nfs_local_iter_setup(struct iov_iter *iter, int rw, struct bio_vec *bvec,
fs/nfs/localio.c
412
iov_iter_bvec(iter, rw, bvec, nvecs, total);
fs/nfs/localio.c
414
iov_iter_advance(iter, start);
fs/nfs/localio.c
415
iov_iter_truncate(iter, len);
fs/nfs/nfs42proc.c
242
struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
fs/nfs/nfs42proc.c
255
list_for_each_entry(iter,
fs/nfs/nfs42proc.c
258
if (memcmp(&res->write_res.stateid, &iter->stateid,
fs/nfs/nfs42proc.c
261
tmp_copy = iter;
fs/nfs/nfs42proc.c
262
list_del(&iter->copies);
fs/nfs/nfstrace.h
1092
const struct iov_iter *iter
fs/nfs/nfstrace.h
1095
TP_ARGS(iocb, iter),
fs/nfs/nfstrace.h
1116
__entry->count = iov_iter_count(iter);
fs/nfs/nfstrace.h
1134
const struct iov_iter *iter \
fs/nfs/nfstrace.h
1136
TP_ARGS(iocb, iter))
fs/nfsd/filecache.c
867
struct rhashtable_iter iter;
fs/nfsd/filecache.c
879
rhltable_walk_enter(&nfsd_file_rhltable, &iter);
fs/nfsd/filecache.c
881
rhashtable_walk_start(&iter);
fs/nfsd/filecache.c
883
nf = rhashtable_walk_next(&iter);
fs/nfsd/filecache.c
887
nf = rhashtable_walk_next(&iter);
fs/nfsd/filecache.c
890
rhashtable_walk_stop(&iter);
fs/nfsd/filecache.c
892
rhashtable_walk_exit(&iter);
fs/nfsd/vfs.c
1118
struct iov_iter iter;
fs/nfsd/vfs.c
1146
iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v,
fs/nfsd/vfs.c
1149
host_err = vfs_iocb_iter_read(nf->nf_file, &kiocb, &iter);
fs/nfsd/vfs.c
1197
struct iov_iter iter;
fs/nfsd/vfs.c
1236
iov_iter_bvec(&iter, ITER_DEST, rqstp->rq_bvec, v, *count - total);
fs/nfsd/vfs.c
1237
host_err = vfs_iocb_iter_read(file, &kiocb, &iter);
fs/nfsd/vfs.c
1279
struct iov_iter iter;
fs/nfsd/vfs.c
1284
iov_iter_bvec_offset(const struct iov_iter *iter)
fs/nfsd/vfs.c
1286
return (unsigned long)(iter->bvec->bv_offset + iter->iov_offset);
fs/nfsd/vfs.c
1295
iov_iter_bvec(&segment->iter, ITER_SOURCE, bvec, nvecs, total);
fs/nfsd/vfs.c
1297
iov_iter_advance(&segment->iter, start);
fs/nfsd/vfs.c
1298
iov_iter_truncate(&segment->iter, len);
fs/nfsd/vfs.c
1349
if (iov_iter_bvec_offset(&segments[nsegs].iter) & (mem_align - 1))
fs/nfsd/vfs.c
1385
segments[i].iter.count);
fs/nfsd/vfs.c
1388
segments[i].iter.count);
fs/nfsd/vfs.c
1397
host_err = vfs_iocb_iter_write(file, kiocb, &segments[i].iter);
fs/nfsd/vfs.c
1401
if (host_err < segments[i].iter.count)
fs/nfsd/vfs.c
1435
struct iov_iter iter;
fs/nfsd/vfs.c
1497
iov_iter_bvec(&iter, ITER_SOURCE, rqstp->rq_bvec, nvecs, *cnt);
fs/nfsd/vfs.c
1498
host_err = vfs_iocb_iter_write(file, &kiocb, &iter);
fs/nilfs2/inode.c
261
nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/nilfs2/inode.c
265
if (iov_iter_rw(iter) == WRITE)
fs/nilfs2/inode.c
269
return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
fs/ntfs3/file.c
51
static bool ntfs_should_use_dio(struct kiocb *iocb, struct iov_iter *iter)
fs/ntfs3/file.c
59
return IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), dio_align);
fs/ntfs3/file.c
826
static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/ntfs3/file.c
831
size_t bytes = iov_iter_count(iter);
fs/ntfs3/file.c
855
!ntfs_should_use_dio(iocb, iter)) {
fs/ntfs3/file.c
857
return generic_file_read_iter(iocb, iter);
fs/ntfs3/file.c
886
err = iomap_dio_rw(iocb, iter, &ntfs_iomap_ops, NULL, dio_flags,
fs/ntfs3/file.c
895
iov_iter_revert(iter, to_zero);
fs/ntfs3/file.c
896
iov_iter_zero(to_zero, iter);
fs/ntfs3/file.c
901
iov_iter_zero(bytes, iter);
fs/ntfs3/index.c
1211
size_t iter = 0;
fs/ntfs3/index.c
1247
if (iter++ >= 1000)
fs/ntfs3/inode.c
611
static int ntfs_iomap_bio_read_folio_range(const struct iomap_iter *iter,
fs/ntfs3/inode.c
616
const struct iomap *iomap = &iter->iomap;
fs/ntfs3/inode.c
617
loff_t pos = iter->pos;
fs/ntfs3/inode.c
619
loff_t length = iomap_length(iter);
fs/ocfs2/aops.c
2416
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/ocfs2/aops.c
2431
if (iocb->ki_pos + iter->count > i_size_read(inode) &&
fs/ocfs2/aops.c
2435
if (iov_iter_rw(iter) == READ)
fs/ocfs2/aops.c
2441
iter, get_block,
fs/ocfs2/dlm/dlmcommon.h
1095
struct dlm_node_iter *iter)
fs/ocfs2/dlm/dlmcommon.h
1097
bitmap_copy(iter->node_map, map, O2NM_MAX_NODES);
fs/ocfs2/dlm/dlmcommon.h
1098
iter->curnode = -1;
fs/ocfs2/dlm/dlmcommon.h
1101
static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
fs/ocfs2/dlm/dlmcommon.h
1104
bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
fs/ocfs2/dlm/dlmcommon.h
1106
iter->curnode = O2NM_MAX_NODES;
fs/ocfs2/dlm/dlmcommon.h
1109
iter->curnode = bit;
fs/ocfs2/dlm/dlmdebug.c
492
struct dlm_lock_resource *res = NULL, *iter;
fs/ocfs2/dlm/dlmdebug.c
507
list_for_each_entry(iter, track_list, tracking) {
fs/ocfs2/dlm/dlmdebug.c
508
if (&iter->tracking != &dlm->tracking_list) {
fs/ocfs2/dlm/dlmdebug.c
509
dlm_lockres_get(iter);
fs/ocfs2/dlm/dlmdebug.c
510
res = iter;
fs/ocfs2/dlm/dlmdomain.c
407
struct hlist_node *iter;
fs/ocfs2/dlm/dlmdomain.c
419
iter = bucket->first;
fs/ocfs2/dlm/dlmdomain.c
420
while (iter) {
fs/ocfs2/dlm/dlmdomain.c
422
res = hlist_entry(iter, struct dlm_lock_resource,
fs/ocfs2/dlm/dlmdomain.c
433
iter = res->hash_node.next;
fs/ocfs2/dlm/dlmmaster.c
1162
static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
fs/ocfs2/dlm/dlmmaster.c
1169
iter->curnode = -1;
fs/ocfs2/dlm/dlmmaster.c
1170
iter->orig_bm = orig_bm;
fs/ocfs2/dlm/dlmmaster.c
1171
iter->cur_bm = cur_bm;
fs/ocfs2/dlm/dlmmaster.c
1174
p1 = *(iter->orig_bm + i);
fs/ocfs2/dlm/dlmmaster.c
1175
p2 = *(iter->cur_bm + i);
fs/ocfs2/dlm/dlmmaster.c
1176
iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
fs/ocfs2/dlm/dlmmaster.c
1180
static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
fs/ocfs2/dlm/dlmmaster.c
1185
if (iter->curnode >= O2NM_MAX_NODES)
fs/ocfs2/dlm/dlmmaster.c
1188
bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
fs/ocfs2/dlm/dlmmaster.c
1189
iter->curnode+1);
fs/ocfs2/dlm/dlmmaster.c
1191
iter->curnode = O2NM_MAX_NODES;
fs/ocfs2/dlm/dlmmaster.c
1196
if (test_bit(bit, iter->orig_bm))
fs/ocfs2/dlm/dlmmaster.c
1201
iter->curnode = bit;
fs/ocfs2/dlm/dlmmaster.c
1647
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmmaster.c
1663
dlm_node_iter_init(nodemap, &iter);
fs/ocfs2/dlm/dlmmaster.c
1664
while ((to = dlm_node_iter_next(&iter)) >= 0) {
fs/ocfs2/dlm/dlmmaster.c
2157
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmmaster.c
2163
dlm_node_iter_init(dlm->domain_map, &iter);
fs/ocfs2/dlm/dlmmaster.c
2166
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
fs/ocfs2/dlm/dlmmaster.c
3014
struct dlm_node_iter *iter)
fs/ocfs2/dlm/dlmmaster.c
3029
while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
fs/ocfs2/dlm/dlmmaster.c
3039
clear_bit(nodenum, iter->node_map);
fs/ocfs2/dlm/dlmmaster.c
3054
clear_bit(nodenum, iter->node_map);
fs/ocfs2/dlm/dlmmaster.c
3403
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmmaster.c
3407
dlm_node_iter_init(dlm->domain_map, &iter);
fs/ocfs2/dlm/dlmmaster.c
3408
clear_bit(old_master, iter.node_map);
fs/ocfs2/dlm/dlmmaster.c
3409
clear_bit(dlm->node_num, iter.node_map);
fs/ocfs2/dlm/dlmmaster.c
3421
dlm->node_num, &iter);
fs/ocfs2/dlm/dlmmaster.c
3431
ret = dlm_do_assert_master(dlm, res, iter.node_map,
fs/ocfs2/dlm/dlmmaster.c
3439
bitmap_zero(iter.node_map, O2NM_MAX_NODES);
fs/ocfs2/dlm/dlmmaster.c
3440
set_bit(old_master, iter.node_map);
fs/ocfs2/dlm/dlmmaster.c
3443
ret = dlm_do_assert_master(dlm, res, iter.node_map,
fs/ocfs2/dlm/dlmmaster.c
711
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmmaster.c
934
dlm_node_iter_init(mle->vote_map, &iter);
fs/ocfs2/dlm/dlmmaster.c
935
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
fs/ocfs2/dlm/dlmrecovery.c
1081
struct list_head *iter, *queue = &res->granted;
fs/ocfs2/dlm/dlmrecovery.c
1085
list_for_each(iter, queue)
fs/ocfs2/dlm/dlmrecovery.c
1603
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmrecovery.c
1633
dlm_node_iter_init(dlm->domain_map, &iter);
fs/ocfs2/dlm/dlmrecovery.c
1636
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
fs/ocfs2/dlm/dlmrecovery.c
1790
struct list_head *queue, *iter;
fs/ocfs2/dlm/dlmrecovery.c
1833
list_for_each(iter, tmpq) {
fs/ocfs2/dlm/dlmrecovery.c
1834
lock = list_entry(iter,
fs/ocfs2/dlm/dlmrecovery.c
2671
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmrecovery.c
2678
dlm_node_iter_init(dlm->domain_map, &iter);
fs/ocfs2/dlm/dlmrecovery.c
2681
clear_bit(dead_node, iter.node_map);
fs/ocfs2/dlm/dlmrecovery.c
2687
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
fs/ocfs2/dlm/dlmrecovery.c
2833
struct dlm_node_iter iter;
fs/ocfs2/dlm/dlmrecovery.c
2842
dlm_node_iter_init(dlm->domain_map, &iter);
fs/ocfs2/dlm/dlmrecovery.c
2852
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
fs/ocfs2/dlm/dlmrecovery.c
2877
iter.curnode = -1;
fs/ocfs2/dlm/dlmunlock.c
395
struct dlm_lock *lock = NULL, *iter;
fs/ocfs2/dlm/dlmunlock.c
463
list_for_each_entry(iter, queue, list) {
fs/ocfs2/dlm/dlmunlock.c
464
if (iter->ml.cookie == unlock->cookie &&
fs/ocfs2/dlm/dlmunlock.c
465
iter->ml.node == unlock->node_idx) {
fs/ocfs2/dlm/dlmunlock.c
466
dlm_lock_get(iter);
fs/ocfs2/dlm/dlmunlock.c
467
lock = iter;
fs/ocfs2/dlmglue.c
3056
struct ocfs2_lock_res *iter, *ret = NULL;
fs/ocfs2/dlmglue.c
3061
list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
fs/ocfs2/dlmglue.c
3063
if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
fs/ocfs2/dlmglue.c
3070
if (iter->l_ops != NULL) {
fs/ocfs2/dlmglue.c
3071
ret = iter;
fs/ocfs2/dlmglue.c
3082
struct ocfs2_lock_res *iter;
fs/ocfs2/dlmglue.c
3085
iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
fs/ocfs2/dlmglue.c
3086
if (iter) {
fs/ocfs2/dlmglue.c
3094
priv->p_tmp_res = *iter;
fs/ocfs2/dlmglue.c
3095
iter = &priv->p_tmp_res;
fs/ocfs2/dlmglue.c
3099
return iter;
fs/ocfs2/dlmglue.c
3109
struct ocfs2_lock_res *iter = v;
fs/ocfs2/dlmglue.c
3114
iter = ocfs2_dlm_next_res(iter, priv);
fs/ocfs2/dlmglue.c
3116
if (iter) {
fs/ocfs2/dlmglue.c
3117
list_add(&dummy->l_debug_list, &iter->l_debug_list);
fs/ocfs2/dlmglue.c
3118
priv->p_tmp_res = *iter;
fs/ocfs2/dlmglue.c
3119
iter = &priv->p_tmp_res;
fs/ocfs2/dlmglue.c
3123
return iter;
fs/ocfs2/journal.c
2143
struct inode *iter;
fs/ocfs2/journal.c
2157
iter = ocfs2_iget(p->osb, ino,
fs/ocfs2/journal.c
2159
if (IS_ERR(iter))
fs/ocfs2/journal.c
2164
OCFS2_I(iter)->ip_flags |= OCFS2_INODE_DIO_ORPHAN_ENTRY;
fs/ocfs2/journal.c
2168
if (OCFS2_I(iter)->ip_next_orphan) {
fs/ocfs2/journal.c
2169
iput(iter);
fs/ocfs2/journal.c
2173
trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno);
fs/ocfs2/journal.c
2176
OCFS2_I(iter)->ip_next_orphan = p->head;
fs/ocfs2/journal.c
2177
p->head = iter;
fs/ocfs2/journal.c
2288
struct inode *iter;
fs/ocfs2/journal.c
2309
iter = oi->ip_next_orphan;
fs/ocfs2/journal.c
2365
inode = iter;
fs/ocfs2/quota_local.c
924
struct ocfs2_quota_chunk *chunk = NULL, *iter;
fs/ocfs2/quota_local.c
928
list_for_each_entry(iter, &oinfo->dqi_chunk, qc_chunk) {
fs/ocfs2/quota_local.c
930
iter->qc_headerbh->b_data;
fs/ocfs2/quota_local.c
932
chunk = iter;
fs/orangefs/devorangefs-req.c
362
struct iov_iter *iter)
fs/orangefs/devorangefs-req.c
371
int total = ret = iov_iter_count(iter);
fs/orangefs/devorangefs-req.c
388
if (!copy_from_iter_full(&head, head_size, iter)) {
fs/orangefs/devorangefs-req.c
423
if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
fs/orangefs/devorangefs-req.c
474
op->downcall.trailer_size, iter)) {
fs/orangefs/file.c
140
ret = orangefs_bufmap_copy_from_iovec(iter, buffer_index,
fs/orangefs/file.c
174
iov_iter_revert(iter, total_size);
fs/orangefs/file.c
254
ret = orangefs_bufmap_copy_to_iovec(iter, buffer_index,
fs/orangefs/file.c
324
struct iov_iter *iter)
fs/orangefs/file.c
334
ret = generic_file_read_iter(iocb, iter);
fs/orangefs/file.c
361
struct iov_iter *iter)
fs/orangefs/file.c
372
ret = generic_file_write_iter(iocb, iter);
fs/orangefs/file.c
50
loff_t *offset, struct iov_iter *iter, size_t total_size,
fs/orangefs/inode.c
107
ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
fs/orangefs/inode.c
220
struct iov_iter iter;
fs/orangefs/inode.c
24
struct iov_iter iter;
fs/orangefs/inode.c
242
iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac));
fs/orangefs/inode.c
246
&offset, &iter, readahead_length(rac),
fs/orangefs/inode.c
264
struct iov_iter iter;
fs/orangefs/inode.c
274
iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio));
fs/orangefs/inode.c
276
ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
fs/orangefs/inode.c
279
iov_iter_zero(~0U, &iter);
fs/orangefs/inode.c
476
struct iov_iter *iter)
fs/orangefs/inode.c
488
enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
fs/orangefs/inode.c
494
size_t count = iov_iter_count(iter);
fs/orangefs/inode.c
519
while (iov_iter_count(iter)) {
fs/orangefs/inode.c
520
size_t each_count = iov_iter_count(iter);
fs/orangefs/inode.c
53
iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen);
fs/orangefs/inode.c
538
ret = wait_for_direct_io(type, inode, offset, iter,
fs/orangefs/inode.c
55
ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
fs/orangefs/inode.c
83
struct iov_iter iter;
fs/orangefs/inode.c
98
iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->nfolios, ow->len);
fs/orangefs/orangefs-bufmap.c
469
int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
fs/orangefs/orangefs-bufmap.c
486
if (copy_page_from_iter(page, 0, n, iter) != n)
fs/orangefs/orangefs-bufmap.c
497
int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
fs/orangefs/orangefs-bufmap.c
515
n = copy_page_to_iter(page, 0, n, iter);
fs/orangefs/orangefs-bufmap.h
27
int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
fs/orangefs/orangefs-bufmap.h
31
int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
fs/overlayfs/file.c
323
static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/overlayfs/file.c
332
if (!iov_iter_count(iter))
fs/overlayfs/file.c
339
return backing_file_read_iter(realfile, iter, iocb, iocb->ki_flags,
fs/overlayfs/file.c
343
static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/overlayfs/file.c
355
if (!iov_iter_count(iter))
fs/overlayfs/file.c
370
ret = backing_file_write_iter(realfile, iter, iocb, ifl, &ctx);
fs/overlayfs/params.c
524
char *dup = NULL, *iter;
fs/overlayfs/params.c
563
iter = dup;
fs/overlayfs/params.c
567
err = ovl_kern_path(iter, &path, Opt_lowerdir);
fs/overlayfs/params.c
571
err = ovl_do_parse_layer(fc, iter, &path, Opt_lowerdir);
fs/overlayfs/params.c
583
iter = strchr(iter, '\0') + 1;
fs/overlayfs/params.c
584
if (*iter) {
fs/overlayfs/params.c
600
iter++;
fs/proc/base.c
3547
static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter)
fs/proc/base.c
3551
if (iter.task)
fs/proc/base.c
3552
put_task_struct(iter.task);
fs/proc/base.c
3555
iter.task = NULL;
fs/proc/base.c
3556
pid = find_ge_pid(iter.tgid, ns);
fs/proc/base.c
3558
iter.tgid = pid_nr_ns(pid, ns);
fs/proc/base.c
3559
iter.task = pid_task(pid, PIDTYPE_TGID);
fs/proc/base.c
3560
if (!iter.task) {
fs/proc/base.c
3561
iter.tgid += 1;
fs/proc/base.c
3564
get_task_struct(iter.task);
fs/proc/base.c
3567
return iter;
fs/proc/base.c
3575
struct tgid_iter iter;
fs/proc/base.c
3593
iter.tgid = pos - TGID_OFFSET;
fs/proc/base.c
3594
iter.task = NULL;
fs/proc/base.c
3595
for (iter = next_tgid(ns, iter);
fs/proc/base.c
3596
iter.task;
fs/proc/base.c
3597
iter.tgid += 1, iter = next_tgid(ns, iter)) {
fs/proc/base.c
3602
if (!has_pid_permissions(fs_info, iter.task, HIDEPID_INVISIBLE))
fs/proc/base.c
3605
len = snprintf(name, sizeof(name), "%u", iter.tgid);
fs/proc/base.c
3606
ctx->pos = iter.tgid + TGID_OFFSET;
fs/proc/base.c
3608
proc_pid_instantiate, iter.task, NULL)) {
fs/proc/base.c
3609
put_task_struct(iter.task);
fs/proc/inode.c
289
static ssize_t proc_reg_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/proc/inode.c
295
return pde->proc_ops->proc_read_iter(iocb, iter);
fs/proc/inode.c
299
ret = pde->proc_ops->proc_read_iter(iocb, iter);
fs/proc/internal.h
394
struct vma_iterator iter;
fs/proc/kcore.c
324
static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/proc/kcore.c
334
size_t buflen = iov_iter_count(iter);
fs/proc/kcore.c
372
if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
fs/proc/kcore.c
416
iter) != tsz) {
fs/proc/kcore.c
465
if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
fs/proc/kcore.c
515
if (iov_iter_zero(tsz, iter) != tsz) {
fs/proc/kcore.c
534
read += vread_iter(iter, src, left);
fs/proc/kcore.c
541
if (fault_in_iov_iter_writeable(iter, left)) {
fs/proc/kcore.c
550
if (copy_to_iter((char *)start, tsz, iter) != tsz) {
fs/proc/kcore.c
568
if (iov_iter_zero(tsz, iter) != tsz) {
fs/proc/kcore.c
581
if (iov_iter_zero(tsz, iter) != tsz)
fs/proc/kcore.c
599
if (iov_iter_zero(tsz, iter) != tsz) {
fs/proc/kcore.c
608
} else if (_copy_to_iter(buf, tsz, iter) != tsz) {
fs/proc/kcore.c
615
if (iov_iter_zero(tsz, iter) != tsz) {
fs/proc/proc_sysctl.c
553
static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
fs/proc/proc_sysctl.c
559
size_t count = iov_iter_count(iter);
fs/proc/proc_sysctl.c
589
if (!copy_from_iter_full(kbuf, count, iter))
fs/proc/proc_sysctl.c
606
if (copy_to_iter(kbuf, count, iter) < count)
fs/proc/proc_sysctl.c
619
static ssize_t proc_sys_read(struct kiocb *iocb, struct iov_iter *iter)
fs/proc/proc_sysctl.c
621
return proc_sys_call_handler(iocb, iter, 0);
fs/proc/proc_sysctl.c
624
static ssize_t proc_sys_write(struct kiocb *iocb, struct iov_iter *iter)
fs/proc/proc_sysctl.c
626
return proc_sys_call_handler(iocb, iter, 1);
fs/proc/task_mmu.c
189
return vma_next(&priv->iter);
fs/proc/task_mmu.c
192
vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos);
fs/proc/task_mmu.c
210
vma_iter_set(&priv->iter, pos);
fs/proc/task_mmu.c
232
return vma_next(&priv->iter);
fs/proc/task_mmu.c
311
vma_iter_init(&priv->iter, mm, (unsigned long)last_addr);
fs/proc/task_nommu.c
181
struct vm_area_struct *vma = vma_next(&priv->iter);
fs/proc/task_nommu.c
221
vma_iter_init(&priv->iter, mm, last_addr);
fs/proc/vmcore.c
159
ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
fs/proc/vmcore.c
182
tmp = iov_iter_zero(nr_bytes, iter);
fs/proc/vmcore.c
185
tmp = copy_oldmem_page_encrypted(iter, pfn,
fs/proc/vmcore.c
189
tmp = copy_oldmem_page(iter, pfn, nr_bytes,
fs/proc/vmcore.c
228
struct iov_iter iter;
fs/proc/vmcore.c
230
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
fs/proc/vmcore.c
232
return read_from_oldmem(&iter, count, ppos, false);
fs/proc/vmcore.c
241
struct iov_iter iter;
fs/proc/vmcore.c
243
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
fs/proc/vmcore.c
245
return read_from_oldmem(&iter, count, ppos,
fs/proc/vmcore.c
263
ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
fs/proc/vmcore.c
266
return copy_oldmem_page(iter, pfn, csize, offset);
fs/proc/vmcore.c
270
static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
fs/proc/vmcore.c
281
if (copy_to_iter(buf, tsz, iter) < tsz)
fs/proc/vmcore.c
333
static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
fs/proc/vmcore.c
340
if (!iov_iter_count(iter) || *fpos >= vmcore_size)
fs/proc/vmcore.c
343
iov_iter_truncate(iter, vmcore_size - *fpos);
fs/proc/vmcore.c
347
tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
fs/proc/vmcore.c
348
if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
fs/proc/vmcore.c
354
if (!iov_iter_count(iter))
fs/proc/vmcore.c
375
(size_t)*fpos, iov_iter_count(iter));
fs/proc/vmcore.c
377
if (vmcoredd_copy_dumps(iter, start, tsz))
fs/proc/vmcore.c
384
if (!iov_iter_count(iter))
fs/proc/vmcore.c
391
iov_iter_count(iter));
fs/proc/vmcore.c
393
if (copy_to_iter(kaddr, tsz, iter) < tsz)
fs/proc/vmcore.c
400
if (!iov_iter_count(iter))
fs/proc/vmcore.c
410
iov_iter_count(iter));
fs/proc/vmcore.c
412
tmp = read_from_oldmem(iter, tsz, &start,
fs/proc/vmcore.c
420
if (!iov_iter_count(iter))
fs/proc/vmcore.c
430
static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
fs/proc/vmcore.c
432
return __read_vmcore(iter, &iocb->ki_pos);
fs/proc/vmcore.c
475
struct iov_iter iter;
fs/proc/vmcore.c
488
iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
fs/proc/vmcore.c
490
rc = __read_vmcore(&iter, &offset);
fs/pstore/platform.c
280
struct kmsg_dump_iter iter;
fs/pstore/platform.c
301
kmsg_dump_rewind(&iter);
fs/pstore/platform.c
328
if (!kmsg_dump_get_buffer(&iter, true, dst + header_size,
fs/read_write.c
1007
&iter);
fs/read_write.c
1011
tot_len = iov_iter_count(&iter);
fs/read_write.c
1020
ret = do_iter_readv_writev(file, &iter, pos, READ, flags);
fs/read_write.c
1022
ret = do_loop_readv_writev(file, &iter, pos, READ, flags);
fs/read_write.c
1035
struct iov_iter iter;
fs/read_write.c
1045
&iter);
fs/read_write.c
1049
tot_len = iov_iter_count(&iter);
fs/read_write.c
1059
ret = do_iter_readv_writev(file, &iter, pos, WRITE, flags);
fs/read_write.c
1061
ret = do_loop_readv_writev(file, &iter, pos, WRITE, flags);
fs/read_write.c
1805
int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter)
fs/read_write.c
1807
size_t len = iov_iter_count(iter);
fs/read_write.c
1809
if (!iter_is_ubuf(iter))
fs/read_write.c
486
struct iov_iter iter;
fs/read_write.c
491
iov_iter_ubuf(&iter, ITER_DEST, buf, len);
fs/read_write.c
493
ret = filp->f_op->read_iter(&kiocb, &iter);
fs/read_write.c
515
struct iov_iter iter;
fs/read_write.c
531
iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len);
fs/read_write.c
532
ret = file->f_op->read_iter(&kiocb, &iter);
fs/read_write.c
588
struct iov_iter iter;
fs/read_write.c
593
iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len);
fs/read_write.c
595
ret = filp->f_op->write_iter(&kiocb, &iter);
fs/read_write.c
639
struct iov_iter iter;
fs/read_write.c
640
iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len);
fs/read_write.c
641
return __kernel_write_iter(file, &iter, pos);
fs/read_write.c
814
static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
fs/read_write.c
827
ret = filp->f_op->read_iter(&kiocb, iter);
fs/read_write.c
829
ret = filp->f_op->write_iter(&kiocb, iter);
fs/read_write.c
837
static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
fs/read_write.c
845
while (iov_iter_count(iter)) {
fs/read_write.c
849
nr = filp->f_op->read(filp, iter_iov_addr(iter),
fs/read_write.c
850
iter_iov_len(iter), ppos);
fs/read_write.c
852
nr = filp->f_op->write(filp, iter_iov_addr(iter),
fs/read_write.c
853
iter_iov_len(iter), ppos);
fs/read_write.c
862
if (nr != iter_iov_len(iter))
fs/read_write.c
864
iov_iter_advance(iter, nr);
fs/read_write.c
871
struct iov_iter *iter)
fs/read_write.c
883
tot_len = iov_iter_count(iter);
fs/read_write.c
890
ret = file->f_op->read_iter(iocb, iter);
fs/read_write.c
898
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
fs/read_write.c
911
tot_len = iov_iter_count(iter);
fs/read_write.c
918
ret = do_iter_readv_writev(file, iter, ppos, READ, flags);
fs/read_write.c
931
struct iov_iter *iter)
fs/read_write.c
943
tot_len = iov_iter_count(iter);
fs/read_write.c
951
ret = file->f_op->write_iter(iocb, iter);
fs/read_write.c
961
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
fs/read_write.c
974
tot_len = iov_iter_count(iter);
fs/read_write.c
983
ret = do_iter_readv_writev(file, iter, ppos, WRITE, flags);
fs/read_write.c
997
struct iov_iter iter;
fs/readdir.c
33
int (*iter)(struct file *, struct dir_context *))
fs/readdir.c
65
ret = iter(file, ctx);
fs/seq_file.c
156
struct iov_iter iter;
fs/seq_file.c
160
iov_iter_init(&iter, ITER_DEST, &iov, 1, size);
fs/seq_file.c
163
ret = seq_read_iter(&kiocb, &iter);
fs/seq_file.c
172
ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/seq_file.c
180
if (!iov_iter_count(iter))
fs/seq_file.c
217
n = copy_to_iter(m->buf + m->from, m->count, iter);
fs/seq_file.c
271
if (m->count >= iov_iter_count(iter))
fs/seq_file.c
282
n = copy_to_iter(m->buf, m->count, iter);
fs/smb/client/cifsencrypt.c
101
iov_iter_count(&iter), 4);
fs/smb/client/cifsencrypt.c
103
rc = cifs_sig_iter(&iter, iov_iter_count(&iter), ctx);
fs/smb/client/cifsencrypt.c
73
static int cifs_sig_iter(const struct iov_iter *iter, size_t maxsize,
fs/smb/client/cifsencrypt.c
76
struct iov_iter tmp_iter = *iter;
fs/smb/client/cifsencrypt.c
90
struct iov_iter iter;
fs/smb/client/cifsencrypt.c
97
iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, size);
fs/smb/client/cifsencrypt.c
99
if (iov_iter_count(&iter) <= 4)
fs/smb/client/cifsfs.h
101
ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter);
fs/smb/client/cifsproto.h
232
struct iov_iter *iter, unsigned int to_read);
fs/smb/client/compress.c
162
struct iov_iter iter = *source;
fs/smb/client/compress.c
165
while (iov_iter_count(&iter) >= SZ_2K) {
fs/smb/client/compress.c
166
size_t part = umin(umin(iov_iter_count(&iter), SZ_2K), max);
fs/smb/client/compress.c
169
n = copy_from_iter(sample + s, part, &iter);
fs/smb/client/compress.c
176
if (iov_iter_count(&iter) < PAGE_SIZE - SZ_2K)
fs/smb/client/compress.c
179
iov_iter_advance(&iter, SZ_2K);
fs/smb/client/compress.c
306
struct iov_iter iter;
fs/smb/client/compress.c
325
iter = rq->rq_iter;
fs/smb/client/compress.c
327
if (!copy_from_iter_full(src, slen, &iter)) {
fs/smb/client/connect.c
817
cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter,
fs/smb/client/connect.c
820
struct msghdr smb_msg = { .msg_iter = *iter };
fs/smb/client/file.c
2944
ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
fs/smb/client/file.c
2950
return netfs_unbuffered_read_iter(iocb, iter);
fs/smb/client/file.c
2956
return netfs_file_read_iter(iocb, iter);
fs/smb/client/file.c
3323
static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
fs/smb/client/file.c
3327
if (iov_iter_rw(iter) == READ)
fs/smb/client/file.c
3328
ret = netfs_unbuffered_read_iter_locked(iocb, iter);
fs/smb/client/file.c
3330
ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
fs/smb/client/smb2misc.c
160
struct cifs_ses *iter;
fs/smb/client/smb2misc.c
164
list_for_each_entry(iter, &pserver->smb_ses_list, smb_ses_list) {
fs/smb/client/smb2misc.c
165
if (iter->Suid == le64_to_cpu(thdr->SessionId)) {
fs/smb/client/smb2misc.c
166
ses = iter;
fs/smb/client/smb2ops.c
4382
struct iov_iter *iter = &rqst[i].rq_iter;
fs/smb/client/smb2ops.c
4383
size_t count = iov_iter_count(iter);
fs/smb/client/smb2ops.c
4395
rc = extract_iter_to_sg(iter, count, &sgtable,
fs/smb/client/smb2ops.c
4397
iov_iter_revert(iter, rc);
fs/smb/client/smb2ops.c
4520
static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size,
fs/smb/client/smb2ops.c
4530
if (copy_folio_from_iter(folio, 0, part, iter) != part)
fs/smb/client/smb2ops.c
4619
unsigned int buf_data_size, struct iov_iter *iter,
fs/smb/client/smb2ops.c
4635
if (iter) {
fs/smb/client/smb2ops.c
4636
rqst.rq_iter = *iter;
fs/smb/client/smb2ops.c
4637
iter_size = iov_iter_count(iter);
fs/smb/client/smb2ops.c
4678
size_t skip, struct iov_iter *iter)
fs/smb/client/smb2ops.c
4686
n = copy_folio_to_iter(folio, skip, len, iter);
fs/smb/client/smb2ops.c
4862
struct iov_iter iter;
fs/smb/client/smb2ops.c
4864
iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
fs/smb/client/smb2ops.c
4866
&iter, true);
fs/smb/client/smb2ops.c
4925
struct iov_iter iter;
fs/smb/client/smb2ops.c
4956
iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
fs/smb/client/smb2ops.c
4959
rc = cifs_read_iter_from_socket(server, &iter, dw->len);
fs/smb/client/smb2ops.c
4965
struct iov_iter tmp = iter;
fs/smb/client/smb2ops.c
4970
iov_iter_truncate(&iter, dw->len);
fs/smb/client/smb2ops.c
4992
&iter, false);
fs/smb/client/smbdirect.c
1418
struct iov_iter *iter,
fs/smb/client/smbdirect.c
1487
if (!iter)
fs/smb/client/smbdirect.c
1505
if (iter) {
fs/smb/client/smbdirect.c
1517
rc = smb_extract_iter_to_rdma(iter, payload_len,
fs/smb/client/smbdirect.c
1597
struct iov_iter *iter,
fs/smb/client/smbdirect.c
1608
while (iov_iter_count(iter) > 0) {
fs/smb/client/smbdirect.c
1609
rc = smbd_post_send_iter(sc, batch, iter, _remaining_data_length);
fs/smb/client/smbdirect.c
2530
struct iov_iter iter;
fs/smb/client/smbdirect.c
2576
iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen);
fs/smb/client/smbdirect.c
2578
rc = smbd_post_send_full_iter(sc, &batch, &iter, &remaining_data_length);
fs/smb/client/smbdirect.c
2886
static int smbd_iter_to_mr(struct iov_iter *iter,
fs/smb/client/smbdirect.c
2894
ret = extract_iter_to_sg(iter, iov_iter_count(iter), sgt, max_sg, 0);
fs/smb/client/smbdirect.c
2909
struct iov_iter *iter,
fs/smb/client/smbdirect.c
2918
num_pages = iov_iter_npages(iter, sp->max_frmr_depth + 1);
fs/smb/client/smbdirect.c
2940
num_pages, iov_iter_count(iter), sp->max_frmr_depth);
fs/smb/client/smbdirect.c
2941
smbd_iter_to_mr(iter, &mr->sgt, sp->max_frmr_depth);
fs/smb/client/smbdirect.c
3149
static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter,
fs/smb/client/smbdirect.c
3153
const struct bio_vec *bv = iter->bvec;
fs/smb/client/smbdirect.c
3154
unsigned long start = iter->iov_offset;
fs/smb/client/smbdirect.c
3158
for (i = 0; i < iter->nr_segs; i++) {
fs/smb/client/smbdirect.c
3181
iov_iter_advance(iter, ret);
fs/smb/client/smbdirect.c
3190
static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
fs/smb/client/smbdirect.c
3194
const struct kvec *kv = iter->kvec;
fs/smb/client/smbdirect.c
3195
unsigned long start = iter->iov_offset;
fs/smb/client/smbdirect.c
3199
for (i = 0; i < iter->nr_segs; i++) {
fs/smb/client/smbdirect.c
3239
iov_iter_advance(iter, ret);
fs/smb/client/smbdirect.c
3247
static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
fs/smb/client/smbdirect.c
3251
const struct folio_queue *folioq = iter->folioq;
fs/smb/client/smbdirect.c
3252
unsigned int slot = iter->folioq_slot;
fs/smb/client/smbdirect.c
3254
size_t offset = iter->iov_offset;
fs/smb/client/smbdirect.c
3285
WARN_ON_ONCE(ret < iter->count);
fs/smb/client/smbdirect.c
3294
iter->folioq = folioq;
fs/smb/client/smbdirect.c
3295
iter->folioq_slot = slot;
fs/smb/client/smbdirect.c
3296
iter->iov_offset = offset;
fs/smb/client/smbdirect.c
3297
iter->count -= ret;
fs/smb/client/smbdirect.c
3312
static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
fs/smb/client/smbdirect.c
3318
switch (iov_iter_type(iter)) {
fs/smb/client/smbdirect.c
3320
ret = smb_extract_bvec_to_rdma(iter, rdma, len);
fs/smb/client/smbdirect.c
3323
ret = smb_extract_kvec_to_rdma(iter, rdma, len);
fs/smb/client/smbdirect.c
3326
ret = smb_extract_folioq_to_rdma(iter, rdma, len);
fs/smb/client/smbdirect.c
59
static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
fs/smb/client/smbdirect.h
61
struct smbd_connection *info, struct iov_iter *iter,
fs/smb/server/smb2pdu.c
7291
struct ksmbd_work *iter;
fs/smb/server/smb2pdu.c
7304
list_for_each_entry(iter, command_list,
fs/smb/server/smb2pdu.c
7306
chdr = smb_get_msg(iter->request_buf);
fs/smb/server/smb2pdu.c
7308
if (iter->async_id !=
fs/smb/server/smb2pdu.c
7316
iter->state = KSMBD_WORK_CANCELLED;
fs/smb/server/smb2pdu.c
7317
if (iter->cancel_fn)
fs/smb/server/smb2pdu.c
7318
iter->cancel_fn(iter->cancel_argv);
fs/smb/server/smb2pdu.c
7326
list_for_each_entry(iter, command_list, request_entry) {
fs/smb/server/smb2pdu.c
7327
chdr = smb_get_msg(iter->request_buf);
fs/smb/server/smb2pdu.c
7330
iter == work)
fs/smb/server/smb2pdu.c
7337
iter->state = KSMBD_WORK_CANCELLED;
fs/smb/server/transport_rdma.c
3022
struct list_head *iter;
fs/smb/server/transport_rdma.c
3030
netdev_for_each_lower_dev(netdev, lower_dev, iter)
fs/splice.c
1501
static ssize_t vmsplice_to_user(struct file *file, struct iov_iter *iter,
fs/splice.c
1506
.total_len = iov_iter_count(iter),
fs/splice.c
1508
.u.data = iter
fs/splice.c
1534
static ssize_t vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
fs/splice.c
1553
ret = iter_to_pipe(iter, pipe, buf_flag);
fs/splice.c
1583
struct iov_iter iter;
fs/splice.c
1601
ARRAY_SIZE(iovstack), &iov, &iter);
fs/splice.c
1605
if (!iov_iter_count(&iter))
fs/splice.c
1608
error = vmsplice_to_pipe(fd_file(f), &iter, flags);
fs/splice.c
1610
error = vmsplice_to_user(fd_file(f), &iter, flags);
fs/udf/dir.c
101
if (iter.fi.fileCharacteristics & FID_FILE_CHAR_PARENT) {
fs/udf/dir.c
107
flen = udf_get_filename(sb, iter.name,
fs/udf/dir.c
108
iter.fi.lengthFileIdent, fname, UDF_NAME_LEN);
fs/udf/dir.c
112
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
fs/udf/dir.c
119
ctx->pos = (iter.pos >> 2) + 1;
fs/udf/dir.c
123
udf_fiiter_release(&iter);
fs/udf/dir.c
45
struct udf_fileident_iter iter;
fs/udf/dir.c
77
for (ret = udf_fiiter_init(&iter, dir, nf_pos);
fs/udf/dir.c
78
!ret && iter.pos < dir->i_size;
fs/udf/dir.c
79
ret = udf_fiiter_advance(&iter)) {
fs/udf/dir.c
84
if (iter.pos < emit_pos)
fs/udf/dir.c
89
ctx->pos = (iter.pos >> 2) + 1;
fs/udf/dir.c
91
if (iter.fi.fileCharacteristics & FID_FILE_CHAR_DELETED) {
fs/udf/dir.c
96
if (iter.fi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) {
fs/udf/directory.c
101
memcpy((char *)(&iter->fi) + len, iter->bh[1]->b_data,
fs/udf/directory.c
103
err = udf_verify_fi(iter);
fs/udf/directory.c
109
le16_to_cpu(iter->fi.lengthOfImpUse);
fs/udf/directory.c
110
if (off + udf_dir_entry_len(&iter->fi) <= blksize) {
fs/udf/directory.c
111
iter->name = iter->bh[0]->b_data + nameoff;
fs/udf/directory.c
113
iter->name = iter->bh[1]->b_data + (nameoff - blksize);
fs/udf/directory.c
115
iter->name = iter->namebuf;
fs/udf/directory.c
117
memcpy(iter->name, iter->bh[0]->b_data + nameoff, len);
fs/udf/directory.c
118
memcpy(iter->name + len, iter->bh[1]->b_data,
fs/udf/directory.c
119
iter->fi.lengthFileIdent - len);
fs/udf/directory.c
125
static void udf_readahead_dir(struct udf_fileident_iter *iter)
fs/udf/directory.c
127
unsigned int ralen = 16 >> (iter->dir->i_blkbits - 9);
fs/udf/directory.c
132
if (iter->loffset & (ralen - 1))
fs/udf/directory.c
135
if (iter->loffset + ralen > (iter->elen >> iter->dir->i_blkbits))
fs/udf/directory.c
136
ralen = (iter->elen >> iter->dir->i_blkbits) - iter->loffset;
fs/udf/directory.c
139
blk = udf_get_lb_pblock(iter->dir->i_sb, &iter->eloc,
fs/udf/directory.c
140
iter->loffset + i);
fs/udf/directory.c
141
tmp = sb_getblk(iter->dir->i_sb, blk);
fs/udf/directory.c
154
static struct buffer_head *udf_fiiter_bread_blk(struct udf_fileident_iter *iter)
fs/udf/directory.c
158
udf_readahead_dir(iter);
fs/udf/directory.c
159
blk = udf_get_lb_pblock(iter->dir->i_sb, &iter->eloc, iter->loffset);
fs/udf/directory.c
160
return sb_bread(iter->dir->i_sb, blk);
fs/udf/directory.c
167
static int udf_fiiter_advance_blk(struct udf_fileident_iter *iter)
fs/udf/directory.c
172
iter->loffset++;
fs/udf/directory.c
173
if (iter->loffset < DIV_ROUND_UP(iter->elen, 1<<iter->dir->i_blkbits))
fs/udf/directory.c
176
iter->loffset = 0;
fs/udf/directory.c
177
err = udf_next_aext(iter->dir, &iter->epos, &iter->eloc,
fs/udf/directory.c
178
&iter->elen, &etype, 1);
fs/udf/directory.c
182
if (iter->pos == iter->dir->i_size) {
fs/udf/directory.c
183
iter->elen = 0;
fs/udf/directory.c
186
udf_err(iter->dir->i_sb,
fs/udf/directory.c
188
(unsigned long long)iter->pos, iter->dir->i_ino);
fs/udf/directory.c
19
static int udf_verify_fi(struct udf_fileident_iter *iter)
fs/udf/directory.c
194
static int udf_fiiter_load_bhs(struct udf_fileident_iter *iter)
fs/udf/directory.c
196
int blksize = 1 << iter->dir->i_blkbits;
fs/udf/directory.c
197
int off = iter->pos & (blksize - 1);
fs/udf/directory.c
202
if (!iter->bh[0] && iter->elen) {
fs/udf/directory.c
203
iter->bh[0] = udf_fiiter_bread_blk(iter);
fs/udf/directory.c
204
if (!iter->bh[0]) {
fs/udf/directory.c
208
if (!buffer_uptodate(iter->bh[0])) {
fs/udf/directory.c
214
if (iter->pos >= iter->dir->i_size)
fs/udf/directory.c
219
fi = (struct fileIdentDesc *)(iter->bh[0]->b_data + off);
fs/udf/directory.c
223
err = udf_fiiter_advance_blk(iter);
fs/udf/directory.c
226
iter->bh[1] = udf_fiiter_bread_blk(iter);
fs/udf/directory.c
227
if (!iter->bh[1]) {
fs/udf/directory.c
23
if (iter->fi.descTag.tagIdent != cpu_to_le16(TAG_IDENT_FID)) {
fs/udf/directory.c
231
if (!buffer_uptodate(iter->bh[1])) {
fs/udf/directory.c
238
brelse(iter->bh[0]);
fs/udf/directory.c
239
brelse(iter->bh[1]);
fs/udf/directory.c
24
udf_err(iter->dir->i_sb,
fs/udf/directory.c
240
iter->bh[0] = iter->bh[1] = NULL;
fs/udf/directory.c
244
int udf_fiiter_init(struct udf_fileident_iter *iter, struct inode *dir,
fs/udf/directory.c
251
iter->dir = dir;
fs/udf/directory.c
252
iter->bh[0] = iter->bh[1] = NULL;
fs/udf/directory.c
253
iter->pos = pos;
fs/udf/directory.c
254
iter->elen = 0;
fs/udf/directory.c
255
iter->epos.bh = NULL;
fs/udf/directory.c
256
iter->name = NULL;
fs/udf/directory.c
26
iter->dir->i_ino, (unsigned long long)iter->pos,
fs/udf/directory.c
262
iter->namebuf = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL | __GFP_NOFAIL);
fs/udf/directory.c
265
err = udf_copy_fi(iter);
fs/udf/directory.c
269
err = inode_bmap(dir, iter->pos >> dir->i_blkbits, &iter->epos,
fs/udf/directory.c
27
le16_to_cpu(iter->fi.descTag.tagIdent));
fs/udf/directory.c
270
&iter->eloc, &iter->elen, &iter->loffset, &etype);
fs/udf/directory.c
280
err = udf_fiiter_load_bhs(iter);
fs/udf/directory.c
283
err = udf_copy_fi(iter);
fs/udf/directory.c
286
udf_fiiter_release(iter);
fs/udf/directory.c
290
int udf_fiiter_advance(struct udf_fileident_iter *iter)
fs/udf/directory.c
293
int blksize = 1 << iter->dir->i_blkbits;
fs/udf/directory.c
296
oldoff = iter->pos & (blksize - 1);
fs/udf/directory.c
297
len = udf_dir_entry_len(&iter->fi);
fs/udf/directory.c
298
iter->pos += len;
fs/udf/directory.c
299
if (UDF_I(iter->dir)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
fs/udf/directory.c
30
len = udf_dir_entry_len(&iter->fi);
fs/udf/directory.c
301
brelse(iter->bh[0]);
fs/udf/directory.c
302
iter->bh[0] = NULL;
fs/udf/directory.c
304
if (iter->bh[1]) {
fs/udf/directory.c
305
iter->bh[0] = iter->bh[1];
fs/udf/directory.c
306
iter->bh[1] = NULL;
fs/udf/directory.c
308
err = udf_fiiter_advance_blk(iter);
fs/udf/directory.c
31
if (le16_to_cpu(iter->fi.lengthOfImpUse) & 3) {
fs/udf/directory.c
313
err = udf_fiiter_load_bhs(iter);
fs/udf/directory.c
317
return udf_copy_fi(iter);
fs/udf/directory.c
32
udf_err(iter->dir->i_sb,
fs/udf/directory.c
320
void udf_fiiter_release(struct udf_fileident_iter *iter)
fs/udf/directory.c
322
iter->dir = NULL;
fs/udf/directory.c
323
brelse(iter->bh[0]);
fs/udf/directory.c
324
brelse(iter->bh[1]);
fs/udf/directory.c
325
iter->bh[0] = iter->bh[1] = NULL;
fs/udf/directory.c
326
kfree(iter->namebuf);
fs/udf/directory.c
327
iter->namebuf = NULL;
fs/udf/directory.c
34
iter->dir->i_ino, (unsigned long long)iter->pos);
fs/udf/directory.c
407
void udf_fiiter_write_fi(struct udf_fileident_iter *iter, uint8_t *impuse)
fs/udf/directory.c
409
struct udf_inode_info *iinfo = UDF_I(iter->dir);
fs/udf/directory.c
412
int blksize = 1 << iter->dir->i_blkbits;
fs/udf/directory.c
414
off = iter->pos & (blksize - 1);
fs/udf/directory.c
417
len1 = iter->dir->i_size;
fs/udf/directory.c
419
buf1 = iter->bh[0]->b_data;
fs/udf/directory.c
42
if (len > 1 << iter->dir->i_blkbits) {
fs/udf/directory.c
421
if (iter->bh[1]) {
fs/udf/directory.c
422
buf2 = iter->bh[1]->b_data;
fs/udf/directory.c
427
udf_copy_fi_to_bufs(buf1, len1, buf2, len2, off, &iter->fi, impuse,
fs/udf/directory.c
428
iter->name == iter->namebuf ? iter->name : NULL);
fs/udf/directory.c
43
udf_err(iter->dir->i_sb,
fs/udf/directory.c
431
mark_inode_dirty(iter->dir);
fs/udf/directory.c
433
mark_buffer_dirty_inode(iter->bh[0], iter->dir);
fs/udf/directory.c
434
if (iter->bh[1])
fs/udf/directory.c
435
mark_buffer_dirty_inode(iter->bh[1], iter->dir);
fs/udf/directory.c
437
inode_inc_iversion(iter->dir);
fs/udf/directory.c
440
void udf_fiiter_update_elen(struct udf_fileident_iter *iter, uint32_t new_elen)
fs/udf/directory.c
442
struct udf_inode_info *iinfo = UDF_I(iter->dir);
fs/udf/directory.c
443
int diff = new_elen - iter->elen;
fs/udf/directory.c
446
if (!iter->elen)
fs/udf/directory.c
448
iter->elen = new_elen;
fs/udf/directory.c
45
iter->dir->i_ino, len, (unsigned long long)iter->pos);
fs/udf/directory.c
450
iter->epos.offset -= sizeof(struct short_ad);
fs/udf/directory.c
452
iter->epos.offset -= sizeof(struct long_ad);
fs/udf/directory.c
453
udf_write_aext(iter->dir, &iter->epos, &iter->eloc, iter->elen, 1);
fs/udf/directory.c
455
mark_inode_dirty(iter->dir);
fs/udf/directory.c
459
int udf_fiiter_append_blk(struct udf_fileident_iter *iter)
fs/udf/directory.c
461
struct udf_inode_info *iinfo = UDF_I(iter->dir);
fs/udf/directory.c
462
int blksize = 1 << iter->dir->i_blkbits;
fs/udf/directory.c
465
uint32_t old_elen = iter->elen;
fs/udf/directory.c
473
udf_fiiter_update_elen(iter, ALIGN(iter->elen, blksize));
fs/udf/directory.c
476
block = iinfo->i_lenExtents >> iter->dir->i_blkbits;
fs/udf/directory.c
477
bh = udf_bread(iter->dir, block, 1, &err);
fs/udf/directory.c
479
udf_fiiter_update_elen(iter, old_elen);
fs/udf/directory.c
48
if (iter->pos + len > iter->dir->i_size) {
fs/udf/directory.c
482
err = inode_bmap(iter->dir, block, &iter->epos, &iter->eloc, &iter->elen,
fs/udf/directory.c
483
&iter->loffset, &etype);
fs/udf/directory.c
485
udf_err(iter->dir->i_sb,
fs/udf/directory.c
487
(unsigned long long)block, iter->dir->i_ino);
fs/udf/directory.c
49
udf_err(iter->dir->i_sb,
fs/udf/directory.c
490
if (!(iter->pos & (blksize - 1))) {
fs/udf/directory.c
491
brelse(iter->bh[0]);
fs/udf/directory.c
492
iter->bh[0] = bh;
fs/udf/directory.c
494
iter->bh[1] = bh;
fs/udf/directory.c
51
iter->dir->i_ino, (unsigned long long)iter->pos);
fs/udf/directory.c
54
if (udf_dir_entry_len(&iter->fi) !=
fs/udf/directory.c
55
sizeof(struct tag) + le16_to_cpu(iter->fi.descTag.descCRCLength)) {
fs/udf/directory.c
56
udf_err(iter->dir->i_sb,
fs/udf/directory.c
58
iter->dir->i_ino,
fs/udf/directory.c
59
(unsigned)le16_to_cpu(iter->fi.descTag.descCRCLength),
fs/udf/directory.c
60
(unsigned)(udf_dir_entry_len(&iter->fi) -
fs/udf/directory.c
67
static int udf_copy_fi(struct udf_fileident_iter *iter)
fs/udf/directory.c
69
struct udf_inode_info *iinfo = UDF_I(iter->dir);
fs/udf/directory.c
70
u32 blksize = 1 << iter->dir->i_blkbits;
fs/udf/directory.c
75
if (iter->pos >= iter->dir->i_size) {
fs/udf/directory.c
76
iter->name = NULL;
fs/udf/directory.c
79
if (iter->dir->i_size < iter->pos + sizeof(struct fileIdentDesc)) {
fs/udf/directory.c
80
udf_err(iter->dir->i_sb,
fs/udf/directory.c
82
iter->dir->i_ino);
fs/udf/directory.c
86
memcpy(&iter->fi, iinfo->i_data + iinfo->i_lenEAttr + iter->pos,
fs/udf/directory.c
88
err = udf_verify_fi(iter);
fs/udf/directory.c
91
iter->name = iinfo->i_data + iinfo->i_lenEAttr + iter->pos +
fs/udf/directory.c
93
le16_to_cpu(iter->fi.lengthOfImpUse);
fs/udf/directory.c
97
off = iter->pos & (blksize - 1);
fs/udf/directory.c
99
memcpy(&iter->fi, iter->bh[0]->b_data + off, len);
fs/udf/inode.c
298
static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fs/udf/inode.c
303
size_t count = iov_iter_count(iter);
fs/udf/inode.c
309
ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
fs/udf/inode.c
310
if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
fs/udf/namei.c
100
udf_fiiter_release(iter);
fs/udf/namei.c
111
struct udf_fileident_iter iter;
fs/udf/namei.c
117
err = udf_fiiter_find_entry(dir, &dentry->d_name, &iter);
fs/udf/namei.c
124
loc = lelb_to_cpu(iter.fi.icb.extLocation);
fs/udf/namei.c
125
udf_fiiter_release(&iter);
fs/udf/namei.c
141
struct udf_fileident_iter iter;
fs/udf/namei.c
198
for (ret = udf_fiiter_init(&iter, inode, 0);
fs/udf/namei.c
199
!ret && iter.pos < inode->i_size;
fs/udf/namei.c
200
ret = udf_fiiter_advance(&iter)) {
fs/udf/namei.c
201
iter.fi.descTag.tagLocation = cpu_to_le32(*block);
fs/udf/namei.c
202
if (iter.fi.lengthOfImpUse != cpu_to_le16(0))
fs/udf/namei.c
203
impuse = dbh->b_data + iter.pos +
fs/udf/namei.c
207
udf_fiiter_write_fi(&iter, impuse);
fs/udf/namei.c
215
udf_fiiter_release(&iter);
fs/udf/namei.c
221
struct udf_fileident_iter *iter)
fs/udf/namei.c
239
for (ret = udf_fiiter_init(iter, dir, 0);
fs/udf/namei.c
240
!ret && iter->pos < dir->i_size;
fs/udf/namei.c
241
ret = udf_fiiter_advance(iter)) {
fs/udf/namei.c
242
if (iter->fi.fileCharacteristics & FID_FILE_CHAR_DELETED) {
fs/udf/namei.c
243
if (udf_dir_entry_len(&iter->fi) == nfidlen) {
fs/udf/namei.c
244
iter->fi.descTag.tagSerialNum = cpu_to_le16(1);
fs/udf/namei.c
245
iter->fi.fileVersionNum = cpu_to_le16(1);
fs/udf/namei.c
246
iter->fi.fileCharacteristics = 0;
fs/udf/namei.c
247
iter->fi.lengthFileIdent = namelen;
fs/udf/namei.c
248
iter->fi.lengthOfImpUse = cpu_to_le16(0);
fs/udf/namei.c
249
memcpy(iter->namebuf, name, namelen);
fs/udf/namei.c
250
iter->name = iter->namebuf;
fs/udf/namei.c
256
udf_fiiter_release(iter);
fs/udf/namei.c
260
blksize - udf_ext0_offset(dir) - iter->pos < nfidlen) {
fs/udf/namei.c
261
udf_fiiter_release(iter);
fs/udf/namei.c
265
ret = udf_fiiter_init(iter, dir, dir->i_size);
fs/udf/namei.c
274
block = iter->eloc.logicalBlockNum +
fs/udf/namei.c
275
((iter->elen - 1) >> dir->i_blkbits);
fs/udf/namei.c
277
off = iter->pos & (blksize - 1);
fs/udf/namei.c
284
ret = udf_fiiter_append_blk(iter);
fs/udf/namei.c
286
udf_fiiter_release(iter);
fs/udf/namei.c
291
if (!(iter->pos & (blksize - 1)))
fs/udf/namei.c
292
block = iter->eloc.logicalBlockNum +
fs/udf/namei.c
293
((iter->elen - 1) >> dir->i_blkbits);
fs/udf/namei.c
295
memset(&iter->fi, 0, sizeof(struct fileIdentDesc));
fs/udf/namei.c
297
udf_new_tag((char *)(&iter->fi), TAG_IDENT_FID, 3, 1, block,
fs/udf/namei.c
300
udf_new_tag((char *)(&iter->fi), TAG_IDENT_FID, 2, 1, block,
fs/udf/namei.c
302
iter->fi.fileVersionNum = cpu_to_le16(1);
fs/udf/namei.c
303
iter->fi.lengthFileIdent = namelen;
fs/udf/namei.c
304
iter->fi.lengthOfImpUse = cpu_to_le16(0);
fs/udf/namei.c
305
memcpy(iter->namebuf, name, namelen);
fs/udf/namei.c
306
iter->name = iter->namebuf;
fs/udf/namei.c
313
udf_fiiter_update_elen(iter, iter->elen -
fs/udf/namei.c
321
static void udf_fiiter_delete_entry(struct udf_fileident_iter *iter)
fs/udf/namei.c
323
iter->fi.fileCharacteristics |= FID_FILE_CHAR_DELETED;
fs/udf/namei.c
325
if (UDF_QUERY_FLAG(iter->dir->i_sb, UDF_FLAG_STRICT))
fs/udf/namei.c
326
memset(&iter->fi.icb, 0x00, sizeof(struct long_ad));
fs/udf/namei.c
328
udf_fiiter_write_fi(iter, NULL);
fs/udf/namei.c
350
struct udf_fileident_iter iter;
fs/udf/namei.c
353
err = udf_fiiter_add_entry(dir, dentry, &iter);
fs/udf/namei.c
359
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
fs/udf/namei.c
360
iter.fi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
fs/udf/namei.c
361
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
fs/udf/namei.c
363
udf_fiiter_write_fi(&iter, NULL);
fs/udf/namei.c
366
udf_fiiter_release(&iter);
fs/udf/namei.c
426
struct udf_fileident_iter iter;
fs/udf/namei.c
438
err = udf_fiiter_add_entry(inode, NULL, &iter);
fs/udf/namei.c
445
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
fs/udf/namei.c
446
iter.fi.icb.extLocation = cpu_to_lelb(dinfo->i_location);
fs/udf/namei.c
447
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
fs/udf/namei.c
449
iter.fi.fileCharacteristics =
fs/udf/namei.c
451
udf_fiiter_write_fi(&iter, NULL);
fs/udf/namei.c
452
udf_fiiter_release(&iter);
fs/udf/namei.c
455
err = udf_fiiter_add_entry(dir, dentry, &iter);
fs/udf/namei.c
461
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
fs/udf/namei.c
462
iter.fi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
fs/udf/namei.c
463
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
fs/udf/namei.c
465
iter.fi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY;
fs/udf/namei.c
466
udf_fiiter_write_fi(&iter, NULL);
fs/udf/namei.c
467
udf_fiiter_release(&iter);
fs/udf/namei.c
479
struct udf_fileident_iter iter;
fs/udf/namei.c
482
for (ret = udf_fiiter_init(&iter, dir, 0);
fs/udf/namei.c
483
!ret && iter.pos < dir->i_size;
fs/udf/namei.c
484
ret = udf_fiiter_advance(&iter)) {
fs/udf/namei.c
485
if (iter.fi.lengthFileIdent &&
fs/udf/namei.c
486
!(iter.fi.fileCharacteristics & FID_FILE_CHAR_DELETED)) {
fs/udf/namei.c
487
udf_fiiter_release(&iter);
fs/udf/namei.c
491
udf_fiiter_release(&iter);
fs/udf/namei.c
500
struct udf_fileident_iter iter;
fs/udf/namei.c
503
ret = udf_fiiter_find_entry(dir, &dentry->d_name, &iter);
fs/udf/namei.c
508
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
fs/udf/namei.c
514
udf_fiiter_delete_entry(&iter);
fs/udf/namei.c
53
struct udf_fileident_iter *iter)
fs/udf/namei.c
531
udf_fiiter_release(&iter);
fs/udf/namei.c
540
struct udf_fileident_iter iter;
fs/udf/namei.c
543
ret = udf_fiiter_find_entry(dir, &dentry->d_name, &iter);
fs/udf/namei.c
548
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
fs/udf/namei.c
557
udf_fiiter_delete_entry(&iter);
fs/udf/namei.c
565
udf_fiiter_release(&iter);
fs/udf/namei.c
66
for (ret = udf_fiiter_init(iter, dir, 0);
fs/udf/namei.c
67
!ret && iter->pos < dir->i_size;
fs/udf/namei.c
68
ret = udf_fiiter_advance(iter)) {
fs/udf/namei.c
69
if (iter->fi.fileCharacteristics & FID_FILE_CHAR_DELETED) {
fs/udf/namei.c
735
struct udf_fileident_iter iter;
fs/udf/namei.c
738
err = udf_fiiter_add_entry(dir, dentry, &iter);
fs/udf/namei.c
74
if (iter->fi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) {
fs/udf/namei.c
741
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
fs/udf/namei.c
742
iter.fi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
fs/udf/namei.c
744
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
fs/udf/namei.c
747
udf_fiiter_write_fi(&iter, NULL);
fs/udf/namei.c
748
udf_fiiter_release(&iter);
fs/udf/namei.c
79
if ((iter->fi.fileCharacteristics & FID_FILE_CHAR_PARENT) &&
fs/udf/namei.c
83
if (!iter->fi.lengthFileIdent)
fs/udf/namei.c
86
flen = udf_get_filename(sb, iter->name,
fs/udf/namei.c
87
iter->fi.lengthFileIdent, fname, UDF_NAME_LEN);
fs/udf/namei.c
918
struct udf_fileident_iter iter;
fs/udf/namei.c
921
err = udf_fiiter_find_entry(d_inode(child), &dotdot_name, &iter);
fs/udf/namei.c
925
tloc = lelb_to_cpu(iter.fi.icb.extLocation);
fs/udf/namei.c
926
udf_fiiter_release(&iter);
fs/udf/udfdecl.h
245
int udf_fiiter_init(struct udf_fileident_iter *iter, struct inode *dir,
fs/udf/udfdecl.h
247
int udf_fiiter_advance(struct udf_fileident_iter *iter);
fs/udf/udfdecl.h
248
void udf_fiiter_release(struct udf_fileident_iter *iter);
fs/udf/udfdecl.h
249
void udf_fiiter_write_fi(struct udf_fileident_iter *iter, uint8_t *impuse);
fs/udf/udfdecl.h
250
void udf_fiiter_update_elen(struct udf_fileident_iter *iter, uint32_t new_elen);
fs/udf/udfdecl.h
251
int udf_fiiter_append_blk(struct udf_fileident_iter *iter);
fs/xfs/xfs_file.c
234
const struct iomap_iter *iter,
fs/xfs/xfs_file.c
238
iomap_init_ioend(iter->inode, bio, file_offset, IOMAP_IOEND_DIRECT);
fs/xfs/xfs_file.c
662
const struct iomap_iter *iter,
fs/xfs/xfs_file.c
666
struct xfs_mount *mp = XFS_I(iter->inode)->i_mount;
fs/xfs/xfs_file.c
667
struct xfs_zone_alloc_ctx *ac = iter->private;
fs/xfs/xfs_file.c
683
ioend = iomap_init_ioend(iter->inode, bio, file_offset,
fs/xfs/xfs_iomap.c
1591
struct iomap_iter *iter =
fs/xfs/xfs_iomap.c
1593
struct xfs_zone_alloc_ctx *ac = iter->private;
fs/xfs/xfs_iomap.c
1761
struct iomap_iter *iter = container_of(iomap, struct iomap_iter,
fs/xfs/xfs_iomap.c
1857
iomap_fill_dirty_folios(iter, &foffset, fend,
fs/xfs/xfs_iomap.c
2037
struct iomap_iter *iter =
fs/xfs/xfs_iomap.c
2043
offset, offset + length, iter->private);
fs/xfs/xfs_trace.h
1750
TP_PROTO(struct kiocb *iocb, struct iov_iter *iter),
fs/xfs/xfs_trace.h
1751
TP_ARGS(iocb, iter),
fs/xfs/xfs_trace.h
1764
__entry->count = iov_iter_count(iter);
fs/xfs/xfs_trace.h
1776
TP_PROTO(struct kiocb *iocb, struct iov_iter *iter), \
fs/xfs/xfs_trace.h
1777
TP_ARGS(iocb, iter))
fs/xfs/xfs_zone_gc.c
1011
if (!data->iter.victim_rtg) {
fs/xfs/xfs_zone_gc.c
1138
if (data->iter.victim_rtg)
fs/xfs/xfs_zone_gc.c
1139
xfs_rtgroup_rele(data->iter.victim_rtg);
fs/xfs/xfs_zone_gc.c
157
struct xfs_zone_gc_iter iter;
fs/xfs/xfs_zone_gc.c
204
data->iter.recs = kzalloc_objs(*data->iter.recs, XFS_ZONE_GC_RECS);
fs/xfs/xfs_zone_gc.c
205
if (!data->iter.recs)
fs/xfs/xfs_zone_gc.c
230
kfree(data->iter.recs);
fs/xfs/xfs_zone_gc.c
245
kfree(data->iter.recs);
fs/xfs/xfs_zone_gc.c
251
struct xfs_zone_gc_iter *iter,
fs/xfs/xfs_zone_gc.c
255
iter->next_startblock = 0;
fs/xfs/xfs_zone_gc.c
256
iter->rec_count = 0;
fs/xfs/xfs_zone_gc.c
257
iter->rec_idx = 0;
fs/xfs/xfs_zone_gc.c
258
iter->victim_rtg = victim_rtg;
fs/xfs/xfs_zone_gc.c
271
struct xfs_zone_gc_iter *iter = private;
fs/xfs/xfs_zone_gc.c
277
iter->recs[iter->rec_count] = *irec;
fs/xfs/xfs_zone_gc.c
278
if (++iter->rec_count == XFS_ZONE_GC_RECS) {
fs/xfs/xfs_zone_gc.c
279
iter->next_startblock =
fs/xfs/xfs_zone_gc.c
304
struct xfs_zone_gc_iter *iter)
fs/xfs/xfs_zone_gc.c
306
struct xfs_rtgroup *rtg = iter->victim_rtg;
fs/xfs/xfs_zone_gc.c
313
ASSERT(iter->next_startblock <= rtg_blocks(rtg));
fs/xfs/xfs_zone_gc.c
314
if (iter->next_startblock == rtg_blocks(rtg))
fs/xfs/xfs_zone_gc.c
317
ASSERT(iter->next_startblock < rtg_blocks(rtg));
fs/xfs/xfs_zone_gc.c
318
ri_low.rm_startblock = iter->next_startblock;
fs/xfs/xfs_zone_gc.c
321
iter->rec_idx = 0;
fs/xfs/xfs_zone_gc.c
322
iter->rec_count = 0;
fs/xfs/xfs_zone_gc.c
328
xfs_zone_gc_query_cb, iter);
fs/xfs/xfs_zone_gc.c
344
sort(iter->recs, iter->rec_count, sizeof(iter->recs[0]),
fs/xfs/xfs_zone_gc.c
351
iter->next_startblock = rtg_blocks(rtg);
fs/xfs/xfs_zone_gc.c
352
if (iter->rec_count == 0)
fs/xfs/xfs_zone_gc.c
358
atomic_dec(&iter->victim_rtg->rtg_gccount);
fs/xfs/xfs_zone_gc.c
359
xfs_rtgroup_rele(iter->victim_rtg);
fs/xfs/xfs_zone_gc.c
360
iter->victim_rtg = NULL;
fs/xfs/xfs_zone_gc.c
367
struct xfs_zone_gc_iter *iter,
fs/xfs/xfs_zone_gc.c
374
if (!iter->victim_rtg)
fs/xfs/xfs_zone_gc.c
378
if (iter->rec_idx == iter->rec_count) {
fs/xfs/xfs_zone_gc.c
379
error = xfs_zone_gc_query(mp, iter);
fs/xfs/xfs_zone_gc.c
382
if (!iter->victim_rtg)
fs/xfs/xfs_zone_gc.c
386
irec = &iter->recs[iter->rec_idx];
fs/xfs/xfs_zone_gc.c
394
iter->rec_idx++;
fs/xfs/xfs_zone_gc.c
401
iter->rec_idx++;
fs/xfs/xfs_zone_gc.c
416
struct xfs_zone_gc_iter *iter,
fs/xfs/xfs_zone_gc.c
419
struct xfs_rmap_irec *irec = &iter->recs[iter->rec_idx];
fs/xfs/xfs_zone_gc.c
425
iter->rec_idx++;
fs/xfs/xfs_zone_gc.c
496
struct xfs_zone_gc_iter *iter = &data->iter;
fs/xfs/xfs_zone_gc.c
514
xfs_zone_gc_iter_init(iter, victim_rtg);
fs/xfs/xfs_zone_gc.c
664
struct xfs_zone_gc_iter *iter = &data->iter;
fs/xfs/xfs_zone_gc.c
679
if (!xfs_zone_gc_iter_next(mp, iter, &irec, &ip))
fs/xfs/xfs_zone_gc.c
698
xfs_rgbno_to_rtb(iter->victim_rtg, irec.rm_startblock);
fs/xfs/xfs_zone_gc.c
703
chunk->victim_rtg = iter->victim_rtg;
fs/xfs/xfs_zone_gc.c
717
xfs_zone_gc_iter_advance(iter, irec.rm_blockcount);
include/drm/drm_client.h
238
#define drm_client_for_each_connector_iter(connector, iter) \
include/drm/drm_client.h
239
drm_for_each_connector_iter(connector, iter) \
include/drm/drm_connector.h
2581
struct drm_connector_list_iter *iter);
include/drm/drm_connector.h
2583
drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
include/drm/drm_connector.h
2584
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
include/drm/drm_connector.h
2599
#define drm_for_each_connector_iter(connector, iter) \
include/drm/drm_connector.h
2600
while ((connector = drm_connector_list_iter_next(iter)))
include/drm/drm_damage_helper.h
46
#define drm_atomic_for_each_plane_damage(iter, rect) \
include/drm/drm_damage_helper.h
47
while (drm_atomic_helper_damage_iter_next(iter, rect))
include/drm/drm_damage_helper.h
74
drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
include/drm/drm_damage_helper.h
78
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
include/drm/drm_print.h
333
drm_coredump_printer(struct drm_print_iterator *iter)
include/drm/drm_print.h
338
.arg = iter,
include/drm/drm_print.h
342
iter->offset = 0;
include/drm/gpu_scheduler.h
715
struct drm_sched_pending_job_iter iter = {
include/drm/gpu_scheduler.h
720
return iter;
include/drm/gpu_scheduler.h
725
__drm_sched_pending_job_iter_end(const struct drm_sched_pending_job_iter iter)
include/drm/gpu_scheduler.h
727
WARN_ON(!drm_sched_is_stopped(iter.sched));
include/linux/backing-file.h
27
ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
include/linux/backing-file.h
30
ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
include/linux/bio-integrity.h
36
#define bip_for_each_vec(bvl, bip, iter) \
include/linux/bio-integrity.h
37
for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
include/linux/bio-integrity.h
78
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
include/linux/bio-integrity.h
93
static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
include/linux/bio.h
100
bvec_advance(&bio->bi_io_vec[iter->idx], iter);
include/linux/bio.h
108
#define bio_for_each_segment_all(bvl, bio, iter) \
include/linux/bio.h
109
for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
include/linux/bio.h
112
struct bvec_iter *iter, unsigned int bytes)
include/linux/bio.h
114
iter->bi_sector += bytes >> 9;
include/linux/bio.h
117
iter->bi_size -= bytes;
include/linux/bio.h
119
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
include/linux/bio.h
125
struct bvec_iter *iter,
include/linux/bio.h
128
iter->bi_sector += bytes >> 9;
include/linux/bio.h
131
iter->bi_size -= bytes;
include/linux/bio.h
133
bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
include/linux/bio.h
158
#define __bio_for_each_segment(bvl, bio, iter, start) \
include/linux/bio.h
159
for (iter = (start); \
include/linux/bio.h
160
(iter).bi_size && \
include/linux/bio.h
161
((bvl = bio_iter_iovec((bio), (iter))), 1); \
include/linux/bio.h
162
bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
include/linux/bio.h
164
#define bio_for_each_segment(bvl, bio, iter) \
include/linux/bio.h
165
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
include/linux/bio.h
167
#define __bio_for_each_bvec(bvl, bio, iter, start) \
include/linux/bio.h
168
for (iter = (start); \
include/linux/bio.h
169
(iter).bi_size && \
include/linux/bio.h
170
((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
include/linux/bio.h
171
bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
include/linux/bio.h
174
#define bio_for_each_bvec(bvl, bio, iter) \
include/linux/bio.h
175
__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
include/linux/bio.h
185
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
include/linux/bio.h
191
struct bvec_iter iter;
include/linux/bio.h
207
bio_for_each_segment(bv, bio, iter)
include/linux/bio.h
23
#define bio_iter_iovec(bio, iter) \
include/linux/bio.h
24
bvec_iter_bvec((bio)->bi_io_vec, (iter))
include/linux/bio.h
26
#define bio_iter_page(bio, iter) \
include/linux/bio.h
27
bvec_iter_page((bio)->bi_io_vec, (iter))
include/linux/bio.h
28
#define bio_iter_len(bio, iter) \
include/linux/bio.h
29
bvec_iter_len((bio)->bi_io_vec, (iter))
include/linux/bio.h
30
#define bio_iter_offset(bio, iter) \
include/linux/bio.h
31
bvec_iter_offset((bio)->bi_io_vec, (iter))
include/linux/bio.h
37
#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
include/linux/bio.h
38
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
include/linux/bio.h
393
static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
include/linux/bio.h
395
if (iov_iter_is_bvec(iter))
include/linux/bio.h
397
return iov_iter_npages(iter, max_segs);
include/linux/bio.h
409
bio_iov_bounce_nr_vecs(struct iov_iter *iter, blk_opf_t op)
include/linux/bio.h
419
return iov_iter_npages(iter, BIO_MAX_VECS);
include/linux/bio.h
420
return iov_iter_npages(iter, BIO_MAX_VECS - 1) + 1;
include/linux/bio.h
469
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
include/linux/bio.h
472
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
include/linux/bio.h
477
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
include/linux/bio.h
485
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
include/linux/bio.h
95
struct bvec_iter_all *iter)
include/linux/bio.h
97
if (iter->idx >= bio->bi_vcnt)
include/linux/blk-integrity.h
132
struct blk_dma_iter *iter)
include/linux/blk-integrity.h
137
struct device *dma_dev, struct blk_dma_iter *iter)
include/linux/blk-integrity.h
43
struct blk_dma_iter *iter);
include/linux/blk-integrity.h
45
struct device *dma_dev, struct blk_dma_iter *iter);
include/linux/blk-mq-dma.h
25
struct blk_map_iter iter;
include/linux/blk-mq-dma.h
29
struct dma_iova_state *state, struct blk_dma_iter *iter);
include/linux/blk-mq-dma.h
31
struct blk_dma_iter *iter);
include/linux/blk-mq-dma.h
9
struct bvec_iter iter;
include/linux/blk-mq.h
1084
struct bvec_iter iter;
include/linux/blk-mq.h
1094
bio_for_each_segment(bvl, _iter.bio, _iter.iter)
include/linux/blk-mq.h
1098
bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
include/linux/blk-mq.h
1102
bio_iter_last(bvec, _iter.iter))
include/linux/bpf_types.h
143
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
include/linux/bpf_verifier.h
454
#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
include/linux/bpf_verifier.h
455
for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
include/linux/bpf_verifier.h
456
iter < frame->allocated_stack / BPF_REG_SIZE; \
include/linux/bpf_verifier.h
457
iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
include/linux/bpf_verifier.h
94
} iter;
include/linux/bvec.h
111
#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
include/linux/bvec.h
114
#define mp_bvec_iter_page(bvec, iter) \
include/linux/bvec.h
115
(__bvec_iter_bvec((bvec), (iter))->bv_page)
include/linux/bvec.h
117
#define mp_bvec_iter_len(bvec, iter) \
include/linux/bvec.h
118
min((iter).bi_size, \
include/linux/bvec.h
119
__bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
include/linux/bvec.h
121
#define mp_bvec_iter_offset(bvec, iter) \
include/linux/bvec.h
122
(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
include/linux/bvec.h
124
#define mp_bvec_iter_page_idx(bvec, iter) \
include/linux/bvec.h
125
(mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
include/linux/bvec.h
127
#define mp_bvec_iter_bvec(bvec, iter) \
include/linux/bvec.h
129
.bv_page = mp_bvec_iter_page((bvec), (iter)), \
include/linux/bvec.h
130
.bv_len = mp_bvec_iter_len((bvec), (iter)), \
include/linux/bvec.h
131
.bv_offset = mp_bvec_iter_offset((bvec), (iter)), \
include/linux/bvec.h
135
#define bvec_iter_offset(bvec, iter) \
include/linux/bvec.h
136
(mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
include/linux/bvec.h
138
#define bvec_iter_len(bvec, iter) \
include/linux/bvec.h
139
min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \
include/linux/bvec.h
140
PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
include/linux/bvec.h
142
#define bvec_iter_page(bvec, iter) \
include/linux/bvec.h
143
(mp_bvec_iter_page((bvec), (iter)) + \
include/linux/bvec.h
144
mp_bvec_iter_page_idx((bvec), (iter)))
include/linux/bvec.h
146
#define bvec_iter_bvec(bvec, iter) \
include/linux/bvec.h
148
.bv_page = bvec_iter_page((bvec), (iter)), \
include/linux/bvec.h
149
.bv_len = bvec_iter_len((bvec), (iter)), \
include/linux/bvec.h
150
.bv_offset = bvec_iter_offset((bvec), (iter)), \
include/linux/bvec.h
154
struct bvec_iter *iter, unsigned bytes)
include/linux/bvec.h
156
unsigned int idx = iter->bi_idx;
include/linux/bvec.h
158
if (WARN_ONCE(bytes > iter->bi_size,
include/linux/bvec.h
160
iter->bi_size = 0;
include/linux/bvec.h
164
iter->bi_size -= bytes;
include/linux/bvec.h
165
bytes += iter->bi_bvec_done;
include/linux/bvec.h
172
iter->bi_idx = idx;
include/linux/bvec.h
173
iter->bi_bvec_done = bytes;
include/linux/bvec.h
182
struct bvec_iter *iter, unsigned int bytes)
include/linux/bvec.h
184
unsigned int done = iter->bi_bvec_done + bytes;
include/linux/bvec.h
186
if (done == bv[iter->bi_idx].bv_len) {
include/linux/bvec.h
188
iter->bi_idx++;
include/linux/bvec.h
190
iter->bi_bvec_done = done;
include/linux/bvec.h
191
iter->bi_size -= bytes;
include/linux/bvec.h
194
#define for_each_bvec(bvl, bio_vec, iter, start) \
include/linux/bvec.h
195
for (iter = (start); \
include/linux/bvec.h
196
(iter).bi_size && \
include/linux/bvec.h
197
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
include/linux/bvec.h
198
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
include/linux/bvec.h
200
#define for_each_mp_bvec(bvl, bio_vec, iter, start) \
include/linux/bvec.h
201
for (iter = (start); \
include/linux/bvec.h
202
(iter).bi_size && \
include/linux/bvec.h
203
((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
include/linux/bvec.h
204
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
include/linux/ceph/messenger.h
134
struct bvec_iter iter;
include/linux/ceph/messenger.h
141
BUG_ON(!(it)->iter.bi_size); \
include/linux/ceph/messenger.h
142
__cur_n = min((it)->iter.bi_size, __n); \
include/linux/ceph/messenger.h
144
bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \
include/linux/ceph/messenger.h
145
if (!(it)->iter.bi_size && (it)->bio->bi_next) { \
include/linux/ceph/messenger.h
148
(it)->iter = (it)->bio->bi_iter; \
include/linux/ceph/messenger.h
168
__cur_iter = (it)->iter; \
include/linux/ceph/messenger.h
178
struct bvec_iter iter;
include/linux/ceph/messenger.h
182
BUG_ON((n) > (it)->iter.bi_size); \
include/linux/ceph/messenger.h
184
bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \
include/linux/ceph/messenger.h
201
__cur_iter = (it)->iter; \
include/linux/ceph/messenger.h
208
BUG_ON((n) > (it)->iter.bi_size); \
include/linux/ceph/messenger.h
209
(it)->iter.bi_size = (n); \
include/linux/ceph/messenger.h
229
struct iov_iter iter;
include/linux/ceph/messenger.h
617
struct iov_iter *iter);
include/linux/ceph/osd_client.h
136
struct iov_iter iter;
include/linux/ceph/osd_client.h
507
unsigned int which, struct iov_iter *iter);
include/linux/codetag.h
80
struct codetag *codetag_next_ct(struct codetag_iterator *iter);
include/linux/crash_dump.h
182
ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
include/linux/crash_dump.h
185
static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
include/linux/crash_dump.h
33
ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
include/linux/dax.h
257
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
include/linux/dax.h
37
void *addr, size_t bytes, struct iov_iter *iter);
include/linux/devcoredump.h
29
struct scatterlist *iter;
include/linux/devcoredump.h
33
iter = table;
include/linux/devcoredump.h
34
for_each_sg(table, iter, sg_nents(table), i) {
include/linux/devcoredump.h
35
page = sg_page(iter);
include/linux/devcoredump.h
41
iter = table;
include/linux/devcoredump.h
43
while (!sg_is_last(iter)) {
include/linux/devcoredump.h
44
iter++;
include/linux/devcoredump.h
45
if (sg_is_chain(iter)) {
include/linux/devcoredump.h
46
iter = sg_chain_ptr(iter);
include/linux/devcoredump.h
48
delete_iter = iter;
include/linux/device/class.h
88
void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
include/linux/device/class.h
90
struct device *class_dev_iter_next(struct class_dev_iter *iter);
include/linux/device/class.h
91
void class_dev_iter_exit(struct class_dev_iter *iter);
include/linux/dma-fence-chain.h
120
#define dma_fence_chain_for_each(iter, head) \
include/linux/dma-fence-chain.h
121
for (iter = dma_fence_get(head); iter; \
include/linux/dma-fence-chain.h
122
iter = dma_fence_chain_walk(iter))
include/linux/fs.h
3050
ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
include/linux/fs.h
3053
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
include/linux/fs.h
3055
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
include/linux/fs.h
3058
struct iov_iter *iter);
include/linux/fs.h
3060
struct iov_iter *iter);
include/linux/fs.h
3104
struct block_device *bdev, struct iov_iter *iter,
include/linux/fs.h
3111
struct iov_iter *iter,
include/linux/fs.h
3114
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
include/linux/fs.h
3248
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
include/linux/fs.h
3632
int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
include/linux/fs.h
426
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
include/linux/fs.h
443
int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
include/linux/fscache.h
506
struct iov_iter *iter,
include/linux/fscache.h
512
return ops->read(cres, start_pos, iter, read_hole,
include/linux/fscache.h
565
struct iov_iter *iter,
include/linux/fscache.h
570
return ops->write(cres, start_pos, iter, term_func, term_func_priv);
include/linux/fsnotify_backend.h
505
static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
include/linux/fsnotify_backend.h
509
*markp = fsnotify_iter_mark(iter, type);
include/linux/fsnotify_backend.h
531
#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
include/linux/fsnotify_backend.h
533
type = fsnotify_iter_step(iter, type, &mark), \
include/linux/ftrace.h
842
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
include/linux/ftrace.h
843
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
include/linux/ftrace.h
845
#define for_ftrace_rec_iter(iter) \
include/linux/ftrace.h
846
for (iter = ftrace_rec_iter_start(); \
include/linux/ftrace.h
847
iter; \
include/linux/ftrace.h
848
iter = ftrace_rec_iter_next(iter))
include/linux/generic-radix-tree.h
312
static inline void __genradix_iter_advance(struct genradix_iter *iter,
include/linux/generic-radix-tree.h
315
if (iter->offset + obj_size < iter->offset) {
include/linux/generic-radix-tree.h
316
iter->offset = SIZE_MAX;
include/linux/generic-radix-tree.h
317
iter->pos = SIZE_MAX;
include/linux/generic-radix-tree.h
321
iter->offset += obj_size;
include/linux/generic-radix-tree.h
324
(iter->offset & (GENRADIX_NODE_SIZE - 1)) + obj_size > GENRADIX_NODE_SIZE)
include/linux/generic-radix-tree.h
325
iter->offset = round_up(iter->offset, GENRADIX_NODE_SIZE);
include/linux/generic-radix-tree.h
327
iter->pos++;
include/linux/generic-radix-tree.h
333
static inline void __genradix_iter_rewind(struct genradix_iter *iter,
include/linux/generic-radix-tree.h
336
if (iter->offset == 0 ||
include/linux/generic-radix-tree.h
337
iter->offset == SIZE_MAX) {
include/linux/generic-radix-tree.h
338
iter->offset = SIZE_MAX;
include/linux/generic-radix-tree.h
342
if ((iter->offset & (GENRADIX_NODE_SIZE - 1)) == 0)
include/linux/generic-radix-tree.h
343
iter->offset -= GENRADIX_NODE_SIZE % obj_size;
include/linux/generic-radix-tree.h
345
iter->offset -= obj_size;
include/linux/generic-radix-tree.h
346
iter->pos--;
include/linux/interval_tree.h
75
void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
include/linux/io_uring/cmd.h
105
int ddir, struct iov_iter *iter,
include/linux/io_uring/cmd.h
46
struct iov_iter *iter,
include/linux/io_uring/cmd.h
52
int ddir, struct iov_iter *iter,
include/linux/io_uring/cmd.h
97
struct iov_iter *iter, struct io_uring_cmd *ioucmd,
include/linux/iomap.h
154
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
include/linux/iomap.h
183
int (*read_folio_range)(const struct iomap_iter *iter,
include/linux/iomap.h
254
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
include/linux/iomap.h
255
int iomap_iter_advance(struct iomap_iter *iter, u64 count);
include/linux/iomap.h
266
static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
include/linux/iomap.h
269
u64 end = iter->iomap.offset + iter->iomap.length;
include/linux/iomap.h
271
if (iter->srcmap.type != IOMAP_HOLE)
include/linux/iomap.h
272
end = min(end, iter->srcmap.offset + iter->srcmap.length);
include/linux/iomap.h
282
static inline u64 iomap_length(const struct iomap_iter *iter)
include/linux/iomap.h
284
return iomap_length_trim(iter, iter->pos, iter->len);
include/linux/iomap.h
290
static inline int iomap_iter_advance_full(struct iomap_iter *iter)
include/linux/iomap.h
292
return iomap_iter_advance(iter, iomap_length(iter));
include/linux/iomap.h
338
static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
include/linux/iomap.h
340
return (iter->iomap.flags & IOMAP_F_SHARED) &&
include/linux/iomap.h
341
iter->srcmap.type == IOMAP_MAPPED;
include/linux/iomap.h
352
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
include/linux/iomap.h
359
unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
include/linux/iomap.h
507
int (*read_folio_range)(const struct iomap_iter *iter,
include/linux/iomap.h
527
void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
include/linux/iomap.h
578
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
include/linux/iomap.h
581
struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
include/linux/iov_iter.h
100
iter->iov_offset = skip;
include/linux/iov_iter.h
101
iter->count -= progress;
include/linux/iov_iter.h
109
size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
112
const struct bio_vec *p = iter->bvec;
include/linux/iov_iter.h
113
size_t progress = 0, skip = iter->iov_offset;
include/linux/iov_iter.h
137
iter->nr_segs -= p - iter->bvec;
include/linux/iov_iter.h
138
iter->bvec = p;
include/linux/iov_iter.h
139
iter->iov_offset = skip;
include/linux/iov_iter.h
140
iter->count -= progress;
include/linux/iov_iter.h
148
size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
151
const struct folio_queue *folioq = iter->folioq;
include/linux/iov_iter.h
152
unsigned int slot = iter->folioq_slot;
include/linux/iov_iter.h
153
size_t progress = 0, skip = iter->iov_offset;
include/linux/iov_iter.h
193
iter->folioq_slot = slot;
include/linux/iov_iter.h
194
iter->folioq = folioq;
include/linux/iov_iter.h
195
iter->iov_offset = skip;
include/linux/iov_iter.h
196
iter->count -= progress;
include/linux/iov_iter.h
204
size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
209
loff_t start = iter->xarray_start + iter->iov_offset;
include/linux/iov_iter.h
211
XA_STATE(xas, iter->xarray, index);
include/linux/iov_iter.h
24
size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
248
iter->iov_offset += progress;
include/linux/iov_iter.h
249
iter->count -= progress;
include/linux/iov_iter.h
257
size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
262
iter->count -= progress;
include/linux/iov_iter.h
27
void __user *base = iter->ubuf;
include/linux/iov_iter.h
293
size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
include/linux/iov_iter.h
296
if (unlikely(iter->count < len))
include/linux/iov_iter.h
297
len = iter->count;
include/linux/iov_iter.h
30
remain = step(base + iter->iov_offset, 0, len, priv, priv2);
include/linux/iov_iter.h
301
if (likely(iter_is_ubuf(iter)))
include/linux/iov_iter.h
302
return iterate_ubuf(iter, len, priv, priv2, ustep);
include/linux/iov_iter.h
303
if (likely(iter_is_iovec(iter)))
include/linux/iov_iter.h
304
return iterate_iovec(iter, len, priv, priv2, ustep);
include/linux/iov_iter.h
305
if (iov_iter_is_bvec(iter))
include/linux/iov_iter.h
306
return iterate_bvec(iter, len, priv, priv2, step);
include/linux/iov_iter.h
307
if (iov_iter_is_kvec(iter))
include/linux/iov_iter.h
308
return iterate_kvec(iter, len, priv, priv2, step);
include/linux/iov_iter.h
309
if (iov_iter_is_folioq(iter))
include/linux/iov_iter.h
310
return iterate_folioq(iter, len, priv, priv2, step);
include/linux/iov_iter.h
311
if (iov_iter_is_xarray(iter))
include/linux/iov_iter.h
312
return iterate_xarray(iter, len, priv, priv2, step);
include/linux/iov_iter.h
313
return iterate_discard(iter, len, priv, priv2, step);
include/linux/iov_iter.h
32
iter->iov_offset += progress;
include/linux/iov_iter.h
327
size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
include/linux/iov_iter.h
33
iter->count -= progress;
include/linux/iov_iter.h
330
return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
include/linux/iov_iter.h
362
size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
include/linux/iov_iter.h
365
if (unlikely(iter->count < len))
include/linux/iov_iter.h
366
len = iter->count;
include/linux/iov_iter.h
369
if (iov_iter_is_bvec(iter))
include/linux/iov_iter.h
370
return iterate_bvec(iter, len, priv, priv2, step);
include/linux/iov_iter.h
371
if (iov_iter_is_kvec(iter))
include/linux/iov_iter.h
372
return iterate_kvec(iter, len, priv, priv2, step);
include/linux/iov_iter.h
373
if (iov_iter_is_folioq(iter))
include/linux/iov_iter.h
374
return iterate_folioq(iter, len, priv, priv2, step);
include/linux/iov_iter.h
375
if (iov_iter_is_xarray(iter))
include/linux/iov_iter.h
376
return iterate_xarray(iter, len, priv, priv2, step);
include/linux/iov_iter.h
377
return iterate_discard(iter, len, priv, priv2, step);
include/linux/iov_iter.h
41
size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
44
const struct iovec *p = iter->__iov;
include/linux/iov_iter.h
45
size_t progress = 0, skip = iter->iov_offset;
include/linux/iov_iter.h
64
iter->nr_segs -= p - iter->__iov;
include/linux/iov_iter.h
65
iter->__iov = p;
include/linux/iov_iter.h
66
iter->iov_offset = skip;
include/linux/iov_iter.h
67
iter->count -= progress;
include/linux/iov_iter.h
75
size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
include/linux/iov_iter.h
78
const struct kvec *p = iter->kvec;
include/linux/iov_iter.h
79
size_t progress = 0, skip = iter->iov_offset;
include/linux/iov_iter.h
98
iter->nr_segs -= p - iter->kvec;
include/linux/iov_iter.h
99
iter->kvec = p;
include/linux/kmsg_dump.h
101
static inline void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
include/linux/kmsg_dump.h
71
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
include/linux/kmsg_dump.h
74
bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
include/linux/kmsg_dump.h
77
void kmsg_dump_rewind(struct kmsg_dump_iter *iter);
include/linux/kmsg_dump.h
89
static inline bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
include/linux/kmsg_dump.h
95
static inline bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
include/linux/kvm_host.h
1124
static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
include/linux/kvm_host.h
1126
iter->node = rb_next(iter->node);
include/linux/kvm_host.h
1127
if (!iter->node)
include/linux/kvm_host.h
1130
iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
include/linux/kvm_host.h
1133
static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
include/linux/kvm_host.h
1141
iter->slots = slots;
include/linux/kvm_host.h
1147
iter->node = NULL;
include/linux/kvm_host.h
1151
iter->node = tmp;
include/linux/kvm_host.h
1162
if (iter->node) {
include/linux/kvm_host.h
1168
tmp = rb_prev(iter->node);
include/linux/kvm_host.h
1170
iter->node = tmp;
include/linux/kvm_host.h
1173
iter->node = rb_last(&slots->gfn_tree);
include/linux/kvm_host.h
1176
if (iter->node) {
include/linux/kvm_host.h
1177
iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
include/linux/kvm_host.h
1188
if (iter->slot->base_gfn + iter->slot->npages <= start)
include/linux/kvm_host.h
1189
kvm_memslot_iter_next(iter);
include/linux/kvm_host.h
1193
static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
include/linux/kvm_host.h
1195
if (!iter->node)
include/linux/kvm_host.h
1202
return iter->slot->base_gfn < end;
include/linux/kvm_host.h
1206
#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
include/linux/kvm_host.h
1207
for (kvm_memslot_iter_start(iter, slots, start); \
include/linux/kvm_host.h
1208
kvm_memslot_iter_is_valid(iter, end); \
include/linux/kvm_host.h
1209
kvm_memslot_iter_next(iter))
include/linux/memcontrol.h
116
struct mem_cgroup_reclaim_iter iter;
include/linux/mmap_lock.h
484
struct vma_iterator *iter,
include/linux/mroute_base.h
302
struct mr_table *(*iter)(struct net *net,
include/linux/mroute_base.h
353
struct mr_table *(*iter)(struct net *net,
include/linux/mroute_base.h
399
void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
include/linux/mroute_base.h
442
static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
include/linux/mtd/nand.h
1000
iter->dataleft);
include/linux/mtd/nand.h
1001
iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
include/linux/mtd/nand.h
1002
iter->oobleft);
include/linux/mtd/nand.h
1014
struct nand_io_iter *iter)
include/linux/mtd/nand.h
1016
nanddev_pos_next_eraseblock(nand, &iter->req.pos);
include/linux/mtd/nand.h
1017
iter->dataleft -= iter->req.datalen;
include/linux/mtd/nand.h
1018
iter->req.databuf.in += iter->req.datalen;
include/linux/mtd/nand.h
1019
iter->req.dataoffs = 0;
include/linux/mtd/nand.h
1020
iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
include/linux/mtd/nand.h
1021
iter->dataleft);
include/linux/mtd/nand.h
1036
const struct nand_io_iter *iter)
include/linux/mtd/nand.h
1038
if (iter->dataleft || iter->oobleft)
include/linux/mtd/nand.h
1054
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
include/linux/mtd/nand.h
1055
for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
include/linux/mtd/nand.h
1056
!nanddev_io_iter_end(nand, iter); \
include/linux/mtd/nand.h
1057
nanddev_io_iter_next_page(nand, iter))
include/linux/mtd/nand.h
1069
#define nanddev_io_for_each_block(nand, type, start, req, iter) \
include/linux/mtd/nand.h
1070
for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
include/linux/mtd/nand.h
1071
!nanddev_io_iter_end(nand, iter); \
include/linux/mtd/nand.h
1072
nanddev_io_iter_next_block(nand, iter))
include/linux/mtd/nand.h
924
struct nand_io_iter *iter)
include/linux/mtd/nand.h
928
iter->req.type = reqtype;
include/linux/mtd/nand.h
929
iter->req.mode = req->mode;
include/linux/mtd/nand.h
930
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
include/linux/mtd/nand.h
931
iter->req.ooboffs = req->ooboffs;
include/linux/mtd/nand.h
932
iter->oobbytes_per_page = mtd_oobavail(mtd, req);
include/linux/mtd/nand.h
933
iter->dataleft = req->len;
include/linux/mtd/nand.h
934
iter->oobleft = req->ooblen;
include/linux/mtd/nand.h
935
iter->req.databuf.in = req->datbuf;
include/linux/mtd/nand.h
936
iter->req.datalen = min_t(unsigned int,
include/linux/mtd/nand.h
937
nand->memorg.pagesize - iter->req.dataoffs,
include/linux/mtd/nand.h
938
iter->dataleft);
include/linux/mtd/nand.h
939
iter->req.oobbuf.in = req->oobbuf;
include/linux/mtd/nand.h
940
iter->req.ooblen = min_t(unsigned int,
include/linux/mtd/nand.h
941
iter->oobbytes_per_page - iter->req.ooboffs,
include/linux/mtd/nand.h
942
iter->oobleft);
include/linux/mtd/nand.h
943
iter->req.continuous = false;
include/linux/mtd/nand.h
961
struct nand_io_iter *iter)
include/linux/mtd/nand.h
965
iter->req.type = reqtype;
include/linux/mtd/nand.h
966
iter->req.mode = req->mode;
include/linux/mtd/nand.h
967
iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
include/linux/mtd/nand.h
968
iter->req.ooboffs = 0;
include/linux/mtd/nand.h
969
iter->oobbytes_per_page = 0;
include/linux/mtd/nand.h
970
iter->dataleft = req->len;
include/linux/mtd/nand.h
971
iter->oobleft = 0;
include/linux/mtd/nand.h
972
iter->req.databuf.in = req->datbuf;
include/linux/mtd/nand.h
973
offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
include/linux/mtd/nand.h
974
iter->req.datalen = min_t(unsigned int,
include/linux/mtd/nand.h
976
iter->dataleft);
include/linux/mtd/nand.h
977
iter->req.oobbuf.in = NULL;
include/linux/mtd/nand.h
978
iter->req.ooblen = 0;
include/linux/mtd/nand.h
979
iter->req.continuous = true;
include/linux/mtd/nand.h
990
struct nand_io_iter *iter)
include/linux/mtd/nand.h
992
nanddev_pos_next_page(nand, &iter->req.pos);
include/linux/mtd/nand.h
993
iter->dataleft -= iter->req.datalen;
include/linux/mtd/nand.h
994
iter->req.databuf.in += iter->req.datalen;
include/linux/mtd/nand.h
995
iter->oobleft -= iter->req.ooblen;
include/linux/mtd/nand.h
996
iter->req.oobbuf.in += iter->req.ooblen;
include/linux/mtd/nand.h
997
iter->req.dataoffs = 0;
include/linux/mtd/nand.h
998
iter->req.ooboffs = 0;
include/linux/mtd/nand.h
999
iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
include/linux/netdevice.h
5135
struct list_head **iter);
include/linux/netdevice.h
5138
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
include/linux/netdevice.h
5139
for (iter = &(dev)->adj_list.upper, \
include/linux/netdevice.h
5140
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
include/linux/netdevice.h
5142
updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
include/linux/netdevice.h
5155
struct list_head **iter);
include/linux/netdevice.h
5157
struct list_head **iter);
include/linux/netdevice.h
5159
#define netdev_for_each_lower_private(dev, priv, iter) \
include/linux/netdevice.h
5160
for (iter = (dev)->adj_list.lower.next, \
include/linux/netdevice.h
5161
priv = netdev_lower_get_next_private(dev, &(iter)); \
include/linux/netdevice.h
5163
priv = netdev_lower_get_next_private(dev, &(iter)))
include/linux/netdevice.h
5165
#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
include/linux/netdevice.h
5166
for (iter = &(dev)->adj_list.lower, \
include/linux/netdevice.h
5167
priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
include/linux/netdevice.h
5169
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
include/linux/netdevice.h
5172
struct list_head **iter);
include/linux/netdevice.h
5174
#define netdev_for_each_lower_dev(dev, ldev, iter) \
include/linux/netdevice.h
5175
for (iter = (dev)->adj_list.lower.next, \
include/linux/netdevice.h
5176
ldev = netdev_lower_get_next(dev, &(iter)); \
include/linux/netdevice.h
5178
ldev = netdev_lower_get_next(dev, &(iter)))
include/linux/netdevice.h
5181
struct list_head **iter);
include/linux/netfs.h
333
struct iov_iter *iter,
include/linux/netfs.h
341
struct iov_iter *iter,
include/linux/netfs.h
391
ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter);
include/linux/netfs.h
392
ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
include/linux/netfs.h
393
ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
include/linux/netfs.h
394
ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
include/linux/netfs.h
397
ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
include/linux/netfs.h
402
ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
include/linux/netfs.h
408
ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter);
include/linux/netfs.h
411
struct iov_iter *iter);
include/linux/netfs.h
441
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
include/linux/nfs_fs.h
566
int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter);
include/linux/nfs_fs.h
568
struct iov_iter *iter, bool swap);
include/linux/nfs_fs.h
570
struct iov_iter *iter, bool swap);
include/linux/page_ext.h
128
static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter,
include/linux/page_ext.h
131
iter->index = 0;
include/linux/page_ext.h
132
iter->start_pfn = pfn;
include/linux/page_ext.h
133
iter->page_ext = page_ext_lookup(pfn);
include/linux/page_ext.h
135
return iter->page_ext;
include/linux/page_ext.h
146
static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter)
include/linux/page_ext.h
150
if (WARN_ON_ONCE(!iter->page_ext))
include/linux/page_ext.h
153
iter->index++;
include/linux/page_ext.h
154
pfn = iter->start_pfn + iter->index;
include/linux/page_ext.h
157
iter->page_ext = page_ext_next(iter->page_ext);
include/linux/page_ext.h
159
iter->page_ext = page_ext_lookup(pfn);
include/linux/page_ext.h
161
return iter->page_ext;
include/linux/page_ext.h
170
static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter)
include/linux/page_ext.h
172
return iter->page_ext;
include/linux/radix-tree.h
233
struct radix_tree_iter *iter, void __rcu **slot);
include/linux/radix-tree.h
249
const struct radix_tree_iter *iter, unsigned int tag);
include/linux/radix-tree.h
264
struct radix_tree_iter *iter, gfp_t gfp,
include/linux/radix-tree.h
281
radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
include/linux/radix-tree.h
291
iter->index = 0;
include/linux/radix-tree.h
292
iter->next_index = start;
include/linux/radix-tree.h
310
struct radix_tree_iter *iter, unsigned flags);
include/linux/radix-tree.h
324
struct radix_tree_iter *iter, unsigned long index)
include/linux/radix-tree.h
326
radix_tree_iter_init(iter, index);
include/linux/radix-tree.h
327
return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
include/linux/radix-tree.h
340
void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
include/linux/radix-tree.h
342
iter->next_index = iter->index;
include/linux/radix-tree.h
343
iter->tags = 0;
include/linux/radix-tree.h
348
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
include/linux/radix-tree.h
350
return iter->index + slots;
include/linux/radix-tree.h
364
struct radix_tree_iter *iter);
include/linux/radix-tree.h
373
radix_tree_chunk_size(struct radix_tree_iter *iter)
include/linux/radix-tree.h
375
return iter->next_index - iter->index;
include/linux/radix-tree.h
398
struct radix_tree_iter *iter, unsigned flags)
include/linux/radix-tree.h
401
iter->tags >>= 1;
include/linux/radix-tree.h
402
if (unlikely(!iter->tags))
include/linux/radix-tree.h
404
if (likely(iter->tags & 1ul)) {
include/linux/radix-tree.h
405
iter->index = __radix_tree_iter_add(iter, 1);
include/linux/radix-tree.h
410
unsigned offset = __ffs(iter->tags);
include/linux/radix-tree.h
412
iter->tags >>= offset++;
include/linux/radix-tree.h
413
iter->index = __radix_tree_iter_add(iter, offset);
include/linux/radix-tree.h
418
long count = radix_tree_chunk_size(iter);
include/linux/radix-tree.h
422
iter->index = __radix_tree_iter_add(iter, 1);
include/linux/radix-tree.h
428
iter->next_index = 0;
include/linux/radix-tree.h
449
#define radix_tree_for_each_slot(slot, root, iter, start) \
include/linux/radix-tree.h
450
for (slot = radix_tree_iter_init(iter, start) ; \
include/linux/radix-tree.h
451
slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
include/linux/radix-tree.h
452
slot = radix_tree_next_slot(slot, iter, 0))
include/linux/radix-tree.h
465
#define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
include/linux/radix-tree.h
466
for (slot = radix_tree_iter_init(iter, start) ; \
include/linux/radix-tree.h
467
slot || (slot = radix_tree_next_chunk(root, iter, \
include/linux/radix-tree.h
469
slot = radix_tree_next_slot(slot, iter, \
include/linux/rhashtable.h
1316
struct rhashtable_iter *iter)
include/linux/rhashtable.h
1318
rhashtable_walk_enter(&hlt->ht, iter);
include/linux/rhashtable.h
246
struct rhashtable_iter *iter);
include/linux/rhashtable.h
247
void rhashtable_walk_exit(struct rhashtable_iter *iter);
include/linux/rhashtable.h
248
int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
include/linux/rhashtable.h
250
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
include/linux/rhashtable.h
253
(void)rhashtable_walk_start_check(iter);
include/linux/rhashtable.h
256
void *rhashtable_walk_next(struct rhashtable_iter *iter);
include/linux/rhashtable.h
257
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
include/linux/rhashtable.h
258
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
include/linux/ring_buffer.h
159
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
include/linux/ring_buffer.h
162
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
include/linux/ring_buffer.h
163
void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
include/linux/ring_buffer.h
164
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
include/linux/ring_buffer.h
165
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
include/linux/ring_buffer.h
166
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
include/linux/rolling_buffer.h
27
struct iov_iter iter; /* Iterator tracking what's left in the buffer */
include/linux/rolling_buffer.h
58
iov_iter_advance(&roll->iter, amount);
include/linux/seq_file.h
110
ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
include/linux/skbuff.h
4189
#define skb_walk_frags(skb, iter) \
include/linux/skbuff.h
4190
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
include/linux/skbuff.h
5437
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
include/linux/trace_events.h
44
const char *trace_print_bitmask_seq(struct trace_iterator *iter, void *bitmask_ptr,
include/linux/trace_events.h
60
int trace_raw_output_prep(struct trace_iterator *iter,
include/linux/trace_events.h
63
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
include/linux/uio.h
101
#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
include/linux/uio.h
115
static inline void iov_iter_save_state(struct iov_iter *iter,
include/linux/uio.h
118
state->iov_offset = iter->iov_offset;
include/linux/uio.h
119
state->count = iter->count;
include/linux/uio.h
120
state->nr_segs = iter->nr_segs;
include/linux/uio.h
392
ssize_t iov_iter_extract_bvecs(struct iov_iter *iter, struct bio_vec *bv,
include/linux/uio.h
412
static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
include/linux/uio.h
414
return user_backed_iter(iter);
include/linux/uio.h
418
ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
include/linux/uio.h
91
struct iov_iter iter;
include/linux/uio.h
94
static inline const struct iovec *iter_iov(const struct iov_iter *iter)
include/linux/uio.h
96
if (iter->iter_type == ITER_UBUF)
include/linux/uio.h
97
return (const struct iovec *) &iter->__ubuf_iovec;
include/linux/uio.h
98
return iter->__iov;
include/linux/vmalloc.h
275
long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
include/media/media-entity.h
1196
struct media_pipeline_pad_iter *iter,
include/media/media-entity.h
1209
#define media_pipeline_for_each_pad(pipe, iter, pad) \
include/media/media-entity.h
1210
for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \
include/media/media-entity.h
1212
pad = __media_pipeline_pad_iter_next((pipe), iter, pad))
include/media/media-entity.h
1230
struct media_pipeline_entity_iter *iter);
include/media/media-entity.h
1239
void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter);
include/media/media-entity.h
1243
struct media_pipeline_entity_iter *iter,
include/media/media-entity.h
1259
#define media_pipeline_for_each_entity(pipe, iter, entity) \
include/media/media-entity.h
1260
for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \
include/media/media-entity.h
1262
entity = __media_pipeline_entity_iter_next((pipe), iter, entity))
include/media/media-entity.h
383
#define media_entity_for_each_pad(entity, iter) \
include/media/media-entity.h
384
for (iter = (entity)->pads; \
include/media/media-entity.h
385
iter < &(entity)->pads[(entity)->num_pads]; \
include/media/media-entity.h
386
++iter)
include/net/bonding.h
406
struct list_head *iter;
include/net/bonding.h
409
bond_for_each_slave(bond, tmp, iter) {
include/net/bonding.h
419
struct list_head *iter;
include/net/bonding.h
422
bond_for_each_slave(bond, tmp, iter) {
include/net/bonding.h
644
struct list_head *iter;
include/net/bonding.h
647
bond_for_each_slave(bond, tmp, iter) {
include/net/bonding.h
745
struct list_head *iter;
include/net/bonding.h
748
bond_for_each_slave(bond, tmp, iter)
include/net/bonding.h
758
struct list_head *iter;
include/net/bonding.h
761
bond_for_each_slave_rcu(bond, tmp, iter)
include/net/bonding.h
83
#define bond_for_each_slave(bond, pos, iter) \
include/net/bonding.h
84
netdev_for_each_lower_private((bond)->dev, pos, iter)
include/net/bonding.h
87
#define bond_for_each_slave_rcu(bond, pos, iter) \
include/net/bonding.h
88
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
include/net/cfg80211.h
7552
(*iter)(void *data, u8 type,
include/net/cfg80211.h
8054
void (*iter)(struct wiphy *wiphy,
include/net/cfg80211.h
9855
void (*iter)(const struct ieee80211_iface_combination *c,
include/net/ip.h
185
unsigned int hlen, struct ip_fraglist_iter *iter);
include/net/ip.h
186
void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
include/net/ip.h
188
static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
include/net/ip.h
190
struct sk_buff *skb = iter->frag;
include/net/ip.h
192
iter->frag = skb->next;
include/net/ipv6.h
166
struct ip6_fraglist_iter *iter);
include/net/ipv6.h
167
void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter);
include/net/ipv6.h
169
static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter)
include/net/ipv6.h
171
struct sk_buff *skb = iter->frag;
include/net/ipv6.h
173
iter->frag = skb->next;
include/net/mac80211.h
6729
void (*iter)(struct ieee80211_hw *hw,
include/net/mac80211.h
6753
void (*iter)(struct ieee80211_hw *hw,
include/net/mac80211.h
6781
void (*iter)(struct ieee80211_hw *hw,
include/net/mac80211.h
6806
void (*iter)(struct ieee80211_hw *hw,
include/net/netfilter/nf_conntrack.h
242
void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
include/net/netfilter/nf_conntrack.h
246
void nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data),
include/net/netfilter/nf_conntrack_expect.h
132
void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *data);
include/net/netfilter/nf_conntrack_expect.h
134
bool (*iter)(struct nf_conntrack_expect *e, void *data),
include/net/netfilter/nf_tables.h
1165
const struct nft_set_iter *iter,
include/net/netfilter/nf_tables.h
337
const struct nft_set_iter *iter,
include/net/netfilter/nf_tables.h
492
struct nft_set_iter *iter);
include/net/netlabel.h
333
struct netlbl_lsm_catmap *iter;
include/net/netlabel.h
336
iter = catmap;
include/net/netlabel.h
338
kfree(iter);
include/net/sctp/sctp.h
103
void sctp_transport_walk_start(struct rhashtable_iter *iter);
include/net/sctp/sctp.h
104
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
include/net/sctp/sctp.h
106
struct rhashtable_iter *iter);
include/net/sctp/sctp.h
108
struct rhashtable_iter *iter, int pos);
include/rdma/ib_verbs.h
3191
#define rdma_for_each_port(device, iter) \
include/rdma/ib_verbs.h
3192
for (iter = rdma_start_port(device + \
include/rdma/ib_verbs.h
3194
iter))); \
include/rdma/ib_verbs.h
3195
iter <= rdma_end_port(device); iter++)
include/rdma/rdmavt_qp.h
1001
int rvt_qp_iter_next(struct rvt_qp_iter *iter);
include/rdma/rw.h
66
struct bvec_iter iter, u64 remote_addr, u32 rkey,
include/sound/pcm.h
1536
struct iov_iter *iter) __must_check;
include/sound/pcm.h
1538
struct iov_iter *iter) __must_check;
include/sound/pcm.h
74
unsigned long pos, struct iov_iter *iter, unsigned long bytes);
include/sound/soc-component.h
142
unsigned long pos, struct iov_iter *iter,
include/sound/soc-component.h
419
struct iov_iter *iter, unsigned long bytes);
include/trace/events/afs.h
609
TP_PROTO(struct afs_call *call, struct iov_iter *iter,
include/trace/events/afs.h
612
TP_ARGS(call, iter, want_more, ret),
include/trace/events/afs.h
627
__entry->remain = iov_iter_count(iter);
include/trace/stages/stage3_trace_output.h
42
trace_print_bitmask_seq(iter, __bitmask, __bitmask_size); \
include/trace/stages/stage3_trace_output.h
54
trace_print_bitmask_seq(iter, __bitmask, __bitmask_size); \
include/trace/trace_custom_events.h
79
trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \
include/trace/trace_custom_events.h
82
struct trace_seq *s = &iter->seq; \
include/trace/trace_custom_events.h
83
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
include/trace/trace_custom_events.h
87
field = (typeof(field))iter->ent; \
include/trace/trace_custom_events.h
89
ret = trace_raw_output_prep(iter, trace_event); \
include/trace/trace_custom_events.h
93
trace_event_printf(iter, print); \
include/trace/trace_events.h
205
trace_raw_output_##call(struct trace_iterator *iter, int flags, \
include/trace/trace_events.h
208
struct trace_seq *s = &iter->seq; \
include/trace/trace_events.h
209
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
include/trace/trace_events.h
213
field = (typeof(field))iter->ent; \
include/trace/trace_events.h
215
ret = trace_raw_output_prep(iter, trace_event); \
include/trace/trace_events.h
219
trace_event_printf(iter, print); \
include/trace/trace_events.h
233
trace_raw_output_##call(struct trace_iterator *iter, int flags, \
include/trace/trace_events.h
238
struct trace_seq *p = &iter->tmp_seq; \
include/trace/trace_events.h
240
entry = iter->ent; \
include/trace/trace_events.h
250
return trace_output_call(iter, #call, print); \
include/uapi/drm/etnaviv_drm.h
253
__u8 iter; /* in/out, select pm domain at index iter */
include/uapi/drm/etnaviv_drm.h
263
__u16 iter; /* in/out, select pm source at index iter */
include/uapi/linux/bpf.h
6776
} iter;
io_uring/mock_file.c
70
struct iov_iter iter;
io_uring/mock_file.c
85
ret = io_uring_cmd_import_fixed_vec(cmd, iovec, iovec_len, dir, &iter,
io_uring/mock_file.c
89
ret = io_copy_regbuf(&iter, ubuf);
io_uring/rsrc.c
1039
static int io_import_kbuf(int ddir, struct iov_iter *iter,
io_uring/rsrc.c
1044
iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, count);
io_uring/rsrc.c
1045
iov_iter_advance(iter, offset);
io_uring/rsrc.c
1049
static int io_import_fixed(int ddir, struct iov_iter *iter,
io_uring/rsrc.c
1065
iov_iter_bvec(iter, ddir, NULL, 0, 0);
io_uring/rsrc.c
1072
return io_import_kbuf(ddir, iter, imu, len, offset);
io_uring/rsrc.c
1096
iov_iter_bvec(iter, ddir, bvec, nr_segs, len);
io_uring/rsrc.c
1097
iter->iov_offset = offset;
io_uring/rsrc.c
1124
int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
io_uring/rsrc.c
1133
return io_import_fixed(ddir, iter, node->buf, buf_addr, len);
io_uring/rsrc.c
1326
static int io_vec_fill_bvec(int ddir, struct iov_iter *iter,
io_uring/rsrc.c
1376
iov_iter_bvec(iter, ddir, res_bvec, bvec_idx, total_len);
io_uring/rsrc.c
1395
static int io_vec_fill_kern_bvec(int ddir, struct iov_iter *iter,
io_uring/rsrc.c
1419
iov_iter_bvec(iter, ddir, res_bvec, res_idx, total_len);
io_uring/rsrc.c
1471
int io_import_reg_vec(int ddir, struct iov_iter *iter,
io_uring/rsrc.c
1530
return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
io_uring/rsrc.c
1532
return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);
io_uring/rsrc.h
68
int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
io_uring/rsrc.h
71
int io_import_reg_vec(int ddir, struct iov_iter *iter,
io_uring/rw.c
1007
req->cqe.res = iov_iter_count(&io->iter);
io_uring/rw.c
1014
ret = io_iter_do_read(rw, &io->iter);
io_uring/rw.c
1019
iov_iter_restore(&io->iter, &io->iter_state);
io_uring/rw.c
105
io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs);
io_uring/rw.c
1144
req->cqe.res = iov_iter_count(&io->iter);
io_uring/rw.c
1174
ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
io_uring/rw.c
1176
ret2 = loop_rw_iter(WRITE, rw, &io->iter);
io_uring/rw.c
1203
iov_iter_save_state(&io->iter, &io->iter_state);
io_uring/rw.c
1214
iov_iter_restore(&io->iter, &io->iter_state);
io_uring/rw.c
129
return import_ubuf(ddir, sel->addr, sqe_len, &io->iter);
io_uring/rw.c
143
iov_iter_save_state(&io->iter, &io->iter_state);
io_uring/rw.c
221
iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta);
io_uring/rw.c
228
iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta);
io_uring/rw.c
251
pi_attr.len, &io->meta.iter);
io_uring/rw.c
383
ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir,
io_uring/rw.c
385
iov_iter_save_state(&io->iter, &io->iter_state);
io_uring/rw.c
407
ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec,
io_uring/rw.c
411
iov_iter_save_state(&io->iter, &io->iter_state);
io_uring/rw.c
518
iov_iter_restore(&io->iter, &io->iter_state);
io_uring/rw.c
687
static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
io_uring/rw.c
711
while (iov_iter_count(iter)) {
io_uring/rw.c
716
if (iter_is_ubuf(iter)) {
io_uring/rw.c
717
addr = iter->ubuf + iter->iov_offset;
io_uring/rw.c
718
len = iov_iter_count(iter);
io_uring/rw.c
719
} else if (!iov_iter_is_bvec(iter)) {
io_uring/rw.c
720
addr = iter_iov_addr(iter);
io_uring/rw.c
721
len = iter_iov_len(iter);
io_uring/rw.c
738
if (!iov_iter_is_bvec(iter)) {
io_uring/rw.c
739
iov_iter_advance(iter, nr);
io_uring/rw.c
830
static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
io_uring/rw.c
835
return file->f_op->read_iter(&rw->kiocb, iter);
io_uring/rw.c
837
return loop_rw_iter(READ, rw, iter);
io_uring/rw.c
933
req->cqe.res = iov_iter_count(&io->iter);
io_uring/rw.c
951
ret = io_iter_do_read(rw, &io->iter);
io_uring/rw.c
986
iov_iter_restore(&io->iter, &io->iter_state);
io_uring/rw.c
99
ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter,
io_uring/rw.c
995
iov_iter_advance(&io->iter, ret);
io_uring/rw.c
996
if (!iov_iter_count(&io->iter))
io_uring/rw.c
999
iov_iter_save_state(&io->iter, &io->iter_state);
io_uring/rw.h
16
struct iov_iter iter;
io_uring/uring_cmd.c
291
struct iov_iter *iter,
io_uring/uring_cmd.c
300
return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
io_uring/uring_cmd.c
307
int ddir, struct iov_iter *iter,
io_uring/uring_cmd.c
321
return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
ipc/util.c
778
struct ipc_proc_iter *iter = s->private;
ipc/util.c
779
return iter->pid_ns;
ipc/util.c
818
struct ipc_proc_iter *iter = s->private;
ipc/util.c
819
struct ipc_proc_iface *iface = iter->iface;
ipc/util.c
828
return sysvipc_find_ipc(&iter->ns->ids[iface->ids], pos);
ipc/util.c
837
struct ipc_proc_iter *iter = s->private;
ipc/util.c
838
struct ipc_proc_iface *iface = iter->iface;
ipc/util.c
841
ids = &iter->ns->ids[iface->ids];
ipc/util.c
864
struct ipc_proc_iter *iter = s->private;
ipc/util.c
865
struct ipc_proc_iface *iface = iter->iface;
ipc/util.c
872
ids = &iter->ns->ids[iface->ids];
ipc/util.c
879
struct ipc_proc_iter *iter = s->private;
ipc/util.c
880
struct ipc_proc_iface *iface = iter->iface;
ipc/util.c
899
struct ipc_proc_iter *iter;
ipc/util.c
901
iter = __seq_open_private(file, &sysvipc_proc_seqops, sizeof(*iter));
ipc/util.c
902
if (!iter)
ipc/util.c
905
iter->iface = pde_data(inode);
ipc/util.c
906
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
ipc/util.c
907
iter->pid_ns = get_pid_ns(task_active_pid_ns(current));
ipc/util.c
915
struct ipc_proc_iter *iter = seq->private;
ipc/util.c
916
put_ipc_ns(iter->ns);
ipc/util.c
917
put_pid_ns(iter->pid_ns);
kernel/auditsc.c
1131
unsigned int iter;
kernel/auditsc.c
1164
iter = 0;
kernel/auditsc.c
1237
if (require_data || (iter > 0) ||
kernel/auditsc.c
1239
if (iter == 0) {
kernel/auditsc.c
1247
" a%d[%d]=", arg, iter++);
kernel/auditsc.c
1281
iter = 0;
kernel/bpf/bpf_iter.c
340
struct bpf_iter_target_info *tinfo = NULL, *iter;
kernel/bpf/bpf_iter.c
349
list_for_each_entry(iter, &targets, list) {
kernel/bpf/bpf_iter.c
350
if (iter->btf_id && iter->btf_id == prog_btf_id) {
kernel/bpf/bpf_iter.c
351
tinfo = iter;
kernel/bpf/bpf_iter.c
354
if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) {
kernel/bpf/bpf_iter.c
355
cache_btf_id(iter, prog);
kernel/bpf/bpf_iter.c
356
tinfo = iter;
kernel/bpf/bpf_iter.c
456
char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
kernel/bpf/bpf_iter.c
458
u32 ulen = info->iter.target_name_len;
kernel/bpf/bpf_iter.c
467
info->iter.target_name_len = target_len + 1;
kernel/bpf/bpf_iter.c
507
struct bpf_iter_target_info *tinfo = NULL, *iter;
kernel/bpf/bpf_iter.c
537
list_for_each_entry(iter, &targets, list) {
kernel/bpf/bpf_iter.c
538
if (iter->btf_id == prog_btf_id) {
kernel/bpf/bpf_iter.c
539
tinfo = iter;
kernel/bpf/cgroup_iter.c
279
info->iter.cgroup.order = aux->cgroup.order;
kernel/bpf/cgroup_iter.c
280
info->iter.cgroup.cgroup_id = cgroup_id(aux->cgroup.start);
kernel/bpf/devmap.c
610
struct list_head *iter;
kernel/bpf/devmap.c
613
netdev_for_each_upper_dev_rcu(dev, upper, iter) {
kernel/bpf/inode.c
186
static void map_iter_free(struct map_iter *iter)
kernel/bpf/inode.c
188
if (iter) {
kernel/bpf/inode.c
189
kfree(iter->key);
kernel/bpf/inode.c
190
kfree(iter);
kernel/bpf/inode.c
196
struct map_iter *iter;
kernel/bpf/inode.c
198
iter = kzalloc_obj(*iter, GFP_KERNEL | __GFP_NOWARN);
kernel/bpf/inode.c
199
if (!iter)
kernel/bpf/inode.c
202
iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
kernel/bpf/inode.c
203
if (!iter->key)
kernel/bpf/inode.c
206
return iter;
kernel/bpf/inode.c
209
map_iter_free(iter);
kernel/bpf/inode.c
274
struct map_iter *iter;
kernel/bpf/inode.c
278
iter = map_iter_alloc(map);
kernel/bpf/inode.c
279
if (!iter)
kernel/bpf/inode.c
284
map_iter_free(iter);
kernel/bpf/inode.c
289
m->private = iter;
kernel/bpf/log.c
822
iter_type_str(reg->iter.btf, reg->iter.btf_id),
kernel/bpf/log.c
823
reg->ref_obj_id, iter_state_str(reg->iter.state),
kernel/bpf/log.c
824
reg->iter.depth);
kernel/bpf/map_iter.c
160
info->iter.map.map_id = aux->map->id;
kernel/bpf/task_iter.c
671
info->iter.task.tid = aux->task.pid;
kernel/bpf/task_iter.c
674
info->iter.task.pid = aux->task.pid;
kernel/bpf/verifier.c
1078
st->iter.btf = btf;
kernel/bpf/verifier.c
1079
st->iter.btf_id = btf_id;
kernel/bpf/verifier.c
1080
st->iter.state = BPF_ITER_STATE_ACTIVE;
kernel/bpf/verifier.c
1081
st->iter.depth = 0;
kernel/bpf/verifier.c
1170
if (st->iter.btf != btf || st->iter.btf_id != btf_id)
kernel/bpf/verifier.c
20109
if (old_reg->iter.btf != cur_reg->iter.btf ||
kernel/bpf/verifier.c
20110
old_reg->iter.btf_id != cur_reg->iter.btf_id ||
kernel/bpf/verifier.c
20111
old_reg->iter.state != cur_reg->iter.state ||
kernel/bpf/verifier.c
20465
if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
kernel/bpf/verifier.c
20469
if (cur_slot->iter.depth != slot->iter.depth)
kernel/bpf/verifier.c
20583
if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) {
kernel/bpf/verifier.c
357
} iter;
kernel/bpf/verifier.c
9040
meta->iter.spi = spi;
kernel/bpf/verifier.c
9041
meta->iter.frameno = reg->frameno;
kernel/bpf/verifier.c
9142
int iter_frameno = meta->iter.frameno;
kernel/bpf/verifier.c
9143
int iter_spi = meta->iter.spi;
kernel/bpf/verifier.c
9237
if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
kernel/bpf/verifier.c
9238
cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
kernel/bpf/verifier.c
9240
cur_iter->iter.state, iter_state_str(cur_iter->iter.state));
kernel/bpf/verifier.c
9244
if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
kernel/bpf/verifier.c
9264
queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
kernel/bpf/verifier.c
9265
queued_iter->iter.depth++;
kernel/bpf/verifier.c
9275
cur_iter->iter.state = BPF_ITER_STATE_DRAINED;
kernel/cgroup/cgroup-internal.h
78
struct css_task_iter iter;
kernel/cgroup/cgroup-v1.c
411
int *iter, ret;
kernel/cgroup/cgroup-v1.c
453
iter = l->list + index;
kernel/cgroup/cgroup-v1.c
454
*pos = *iter;
kernel/cgroup/cgroup-v1.c
455
return iter;
kernel/cgroup/cgroup.c
5235
css_task_iter_end(&ctx->procs.iter);
kernel/cgroup/cgroup.c
5246
return css_task_iter_next(&ctx->procs.iter);
kernel/cgroup/cgroup.c
5255
struct css_task_iter *it = &ctx->procs.iter;
kernel/crash_dump_dm_crypt.c
57
struct iov_iter iter;
kernel/crash_dump_dm_crypt.c
59
iov_iter_kvec(&iter, READ, &kvec, 1, count);
kernel/crash_dump_dm_crypt.c
60
return read_from_oldmem(&iter, count, ppos, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
kernel/debug/kdb/kdb_main.c
2008
struct kmsg_dump_iter iter;
kernel/debug/kdb/kdb_main.c
2028
kmsg_dump_rewind(&iter);
kernel/debug/kdb/kdb_main.c
2029
while (kmsg_dump_get_line(&iter, 1, NULL, 0, NULL))
kernel/debug/kdb/kdb_main.c
2061
kmsg_dump_rewind(&iter);
kernel/debug/kdb/kdb_main.c
2062
while (kmsg_dump_get_line(&iter, 1, buf, sizeof(buf), &len)) {
kernel/events/core.c
11759
struct perf_addr_filter *filter, *iter;
kernel/events/core.c
11761
list_for_each_entry_safe(filter, iter, filters, entry) {
kernel/events/core.c
2259
struct perf_event *iter;
kernel/events/core.c
2265
iter = event->aux_event;
kernel/events/core.c
2267
put_event(iter);
kernel/events/core.c
2275
for_each_sibling_event(iter, event) {
kernel/events/core.c
2276
if (iter->aux_event != event)
kernel/events/core.c
2279
iter->aux_event = NULL;
kernel/events/core.c
2287
__event_disable(iter, ctx, PERF_EVENT_STATE_ERROR);
kernel/events/core.c
9065
struct perf_event *iter;
kernel/events/core.c
9070
list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
kernel/events/core.c
9077
cpu = iter->cpu;
kernel/events/core.c
9079
cpu = READ_ONCE(iter->oncpu);
kernel/events/hw_breakpoint.c
326
struct perf_event *iter;
kernel/events/hw_breakpoint.c
339
rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
kernel/events/hw_breakpoint.c
340
if (find_slot_idx(iter->attr.bp_type) != type)
kernel/events/hw_breakpoint.c
343
if (iter->cpu >= 0) {
kernel/events/hw_breakpoint.c
347
} else if (cpu != iter->cpu)
kernel/events/hw_breakpoint.c
351
count += hw_breakpoint_weight(iter);
kernel/events/uprobes.c
2527
return_consumer_find(struct return_instance *ri, int *iter, int id)
kernel/events/uprobes.c
2532
for (idx = *iter; idx < ri->cons_cnt; idx++)
kernel/events/uprobes.c
2536
*iter = idx + 1;
kernel/gcov/fs.c
113
struct gcov_iterator *iter;
kernel/gcov/fs.c
119
iter = kvmalloc_flex(*iter, buffer, size);
kernel/gcov/fs.c
120
if (!iter)
kernel/gcov/fs.c
123
iter->info = info;
kernel/gcov/fs.c
124
iter->size = size;
kernel/gcov/fs.c
125
convert_to_gcda(iter->buffer, info);
kernel/gcov/fs.c
127
return iter;
kernel/gcov/fs.c
135
static void gcov_iter_free(struct gcov_iterator *iter)
kernel/gcov/fs.c
137
kvfree(iter);
kernel/gcov/fs.c
144
static struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
kernel/gcov/fs.c
146
return iter->info;
kernel/gcov/fs.c
153
static void gcov_iter_start(struct gcov_iterator *iter)
kernel/gcov/fs.c
155
iter->pos = 0;
kernel/gcov/fs.c
164
static int gcov_iter_next(struct gcov_iterator *iter)
kernel/gcov/fs.c
166
if (iter->pos < iter->size)
kernel/gcov/fs.c
167
iter->pos += ITER_STRIDE;
kernel/gcov/fs.c
169
if (iter->pos >= iter->size)
kernel/gcov/fs.c
182
static int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
kernel/gcov/fs.c
186
if (iter->pos >= iter->size)
kernel/gcov/fs.c
190
if (iter->pos + len > iter->size)
kernel/gcov/fs.c
191
len = iter->size - iter->pos;
kernel/gcov/fs.c
193
seq_write(seq, iter->buffer + iter->pos, len);
kernel/gcov/fs.c
219
struct gcov_iterator *iter = data;
kernel/gcov/fs.c
222
if (gcov_iter_next(iter))
kernel/gcov/fs.c
225
return iter;
kernel/gcov/fs.c
231
struct gcov_iterator *iter = data;
kernel/gcov/fs.c
233
if (gcov_iter_write(iter, seq))
kernel/gcov/fs.c
291
struct gcov_iterator *iter;
kernel/gcov/fs.c
305
iter = gcov_iter_new(info);
kernel/gcov/fs.c
306
if (!iter)
kernel/gcov/fs.c
312
seq->private = iter;
kernel/gcov/fs.c
318
gcov_iter_free(iter);
kernel/gcov/fs.c
330
struct gcov_iterator *iter;
kernel/gcov/fs.c
335
iter = seq->private;
kernel/gcov/fs.c
336
info = gcov_iter_get_info(iter);
kernel/gcov/fs.c
337
gcov_iter_free(iter);
kernel/jump_label.c
387
struct jump_entry *iter;
kernel/jump_label.c
389
iter = iter_start;
kernel/jump_label.c
390
while (iter < iter_stop) {
kernel/jump_label.c
391
if (init || !jump_entry_is_init(iter)) {
kernel/jump_label.c
392
if (addr_conflict(iter, start, end))
kernel/jump_label.c
395
iter++;
kernel/jump_label.c
530
struct jump_entry *iter;
kernel/jump_label.c
548
for (iter = iter_start; iter < iter_stop; iter++) {
kernel/jump_label.c
553
if (jump_label_type(iter) == JUMP_LABEL_NOP)
kernel/jump_label.c
554
arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
kernel/jump_label.c
556
in_init = init_section_contains((void *)jump_entry_code(iter), 1);
kernel/jump_label.c
557
jump_entry_set_init(iter, in_init);
kernel/jump_label.c
559
iterk = jump_entry_key(iter);
kernel/jump_label.c
564
static_key_set_entries(key, iter);
kernel/jump_label.c
586
struct jump_entry *iter;
kernel/jump_label.c
594
for (iter = iter_start; iter < iter_stop; iter++) {
kernel/jump_label.c
595
struct static_key *iterk = jump_entry_key(iter);
kernel/jump_label.c
703
struct jump_entry *iter;
kernel/jump_label.c
713
for (iter = iter_start; iter < iter_stop; iter++) {
kernel/jump_label.c
717
in_init = within_module_init(jump_entry_code(iter), mod);
kernel/jump_label.c
718
jump_entry_set_init(iter, in_init);
kernel/jump_label.c
720
iterk = jump_entry_key(iter);
kernel/jump_label.c
726
static_key_set_entries(key, iter);
kernel/jump_label.c
757
jlm->entries = iter;
kernel/jump_label.c
764
if (jump_label_type(iter) != jump_label_init_type(iter))
kernel/jump_label.c
765
__jump_label_update(key, iter, iter_stop, true);
kernel/jump_label.c
775
struct jump_entry *iter;
kernel/jump_label.c
779
for (iter = iter_start; iter < iter_stop; iter++) {
kernel/jump_label.c
780
if (jump_entry_key(iter) == key)
kernel/jump_label.c
783
key = jump_entry_key(iter);
kernel/kallsyms.c
608
static int get_ksymbol_mod(struct kallsym_iter *iter)
kernel/kallsyms.c
610
int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
kernel/kallsyms.c
611
&iter->value, &iter->type,
kernel/kallsyms.c
612
iter->name, iter->module_name,
kernel/kallsyms.c
613
&iter->exported);
kernel/kallsyms.c
615
iter->pos_mod_end = iter->pos;
kernel/kallsyms.c
627
static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
kernel/kallsyms.c
629
int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
kernel/kallsyms.c
630
&iter->value, &iter->type,
kernel/kallsyms.c
631
iter->name, iter->module_name,
kernel/kallsyms.c
632
&iter->exported);
kernel/kallsyms.c
634
iter->pos_ftrace_mod_end = iter->pos;
kernel/kallsyms.c
641
static int get_ksymbol_bpf(struct kallsym_iter *iter)
kernel/kallsyms.c
645
strscpy(iter->module_name, "bpf", MODULE_NAME_LEN);
kernel/kallsyms.c
646
iter->exported = 0;
kernel/kallsyms.c
647
ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
kernel/kallsyms.c
648
&iter->value, &iter->type,
kernel/kallsyms.c
649
iter->name);
kernel/kallsyms.c
651
iter->pos_bpf_end = iter->pos;
kernel/kallsyms.c
663
static int get_ksymbol_kprobe(struct kallsym_iter *iter)
kernel/kallsyms.c
665
strscpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
kernel/kallsyms.c
666
iter->exported = 0;
kernel/kallsyms.c
667
return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
kernel/kallsyms.c
668
&iter->value, &iter->type,
kernel/kallsyms.c
669
iter->name) < 0 ? 0 : 1;
kernel/kallsyms.c
673
static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
kernel/kallsyms.c
675
unsigned off = iter->nameoff;
kernel/kallsyms.c
677
iter->module_name[0] = '\0';
kernel/kallsyms.c
678
iter->value = kallsyms_sym_address(iter->pos);
kernel/kallsyms.c
680
iter->type = kallsyms_get_symbol_type(off);
kernel/kallsyms.c
682
off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
kernel/kallsyms.c
684
return off - iter->nameoff;
kernel/kallsyms.c
687
static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
kernel/kallsyms.c
689
iter->name[0] = '\0';
kernel/kallsyms.c
690
iter->nameoff = get_symbol_offset(new_pos);
kernel/kallsyms.c
691
iter->pos = new_pos;
kernel/kallsyms.c
693
iter->pos_mod_end = 0;
kernel/kallsyms.c
694
iter->pos_ftrace_mod_end = 0;
kernel/kallsyms.c
695
iter->pos_bpf_end = 0;
kernel/kallsyms.c
704
static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
kernel/kallsyms.c
706
iter->pos = pos;
kernel/kallsyms.c
708
if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
kernel/kallsyms.c
709
get_ksymbol_mod(iter))
kernel/kallsyms.c
712
if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
kernel/kallsyms.c
713
get_ksymbol_ftrace_mod(iter))
kernel/kallsyms.c
716
if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
kernel/kallsyms.c
717
get_ksymbol_bpf(iter))
kernel/kallsyms.c
720
return get_ksymbol_kprobe(iter);
kernel/kallsyms.c
724
static int update_iter(struct kallsym_iter *iter, loff_t pos)
kernel/kallsyms.c
728
return update_iter_mod(iter, pos);
kernel/kallsyms.c
731
if (pos != iter->pos)
kernel/kallsyms.c
732
reset_iter(iter, pos);
kernel/kallsyms.c
734
iter->nameoff += get_ksymbol_core(iter);
kernel/kallsyms.c
735
iter->pos++;
kernel/kallsyms.c
763
struct kallsym_iter *iter = m->private;
kernel/kallsyms.c
766
if (!iter->name[0])
kernel/kallsyms.c
769
value = iter->show_value ? (void *)iter->value : NULL;
kernel/kallsyms.c
771
if (iter->module_name[0]) {
kernel/kallsyms.c
778
type = iter->exported ? toupper(iter->type) :
kernel/kallsyms.c
779
tolower(iter->type);
kernel/kallsyms.c
781
type, iter->name, iter->module_name);
kernel/kallsyms.c
784
iter->type, iter->name);
kernel/kallsyms.c
840
struct kallsym_iter *iter = priv_data;
kernel/kallsyms.c
842
reset_iter(iter, 0);
kernel/kallsyms.c
847
iter->show_value = kallsyms_show_value(current_cred());
kernel/kallsyms.c
891
struct kallsym_iter *iter;
kernel/kallsyms.c
892
iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
kernel/kallsyms.c
893
if (!iter)
kernel/kallsyms.c
895
reset_iter(iter, 0);
kernel/kallsyms.c
901
iter->show_value = kallsyms_show_value(file->f_cred);
kernel/kcsan/kcsan_test.c
323
static noinline void test_delay(int iter)
kernel/kcsan/kcsan_test.c
325
while (iter--)
kernel/kprobes.c
2572
unsigned long *iter;
kernel/kprobes.c
2575
for (iter = start; iter < end; iter++) {
kernel/kprobes.c
2576
entry = (unsigned long)dereference_symbol_descriptor((void *)*iter);
kernel/liveupdate/luo_file.c
208
struct luo_file *iter;
kernel/liveupdate/luo_file.c
210
list_for_each_entry(iter, &file_set->files_list, list) {
kernel/liveupdate/luo_file.c
211
if (iter->token == token)
kernel/liveupdate/luo_flb.c
240
struct luo_flb_link *iter;
kernel/liveupdate/luo_flb.c
243
list_for_each_entry(iter, flb_list, list) {
kernel/liveupdate/luo_flb.c
244
err = luo_flb_file_preserve_one(iter->flb);
kernel/liveupdate/luo_flb.c
252
list_for_each_entry_continue_reverse(iter, flb_list, list)
kernel/liveupdate/luo_flb.c
253
luo_flb_file_unpreserve_one(iter->flb);
kernel/liveupdate/luo_flb.c
273
struct luo_flb_link *iter;
kernel/liveupdate/luo_flb.c
275
list_for_each_entry_reverse(iter, flb_list, list)
kernel/liveupdate/luo_flb.c
276
luo_flb_file_unpreserve_one(iter->flb);
kernel/liveupdate/luo_flb.c
293
struct luo_flb_link *iter;
kernel/liveupdate/luo_flb.c
295
list_for_each_entry_reverse(iter, flb_list, list)
kernel/liveupdate/luo_flb.c
296
luo_flb_file_finish_one(iter->flb);
kernel/liveupdate/luo_flb.c
328
struct luo_flb_link *iter;
kernel/liveupdate/luo_flb.c
360
list_for_each_entry(iter, flb_list, list) {
kernel/liveupdate/luo_flb.c
361
if (iter->flb == flb)
kernel/liveupdate/luo_flb.c
434
struct luo_flb_link *iter;
kernel/liveupdate/luo_flb.c
448
list_for_each_entry(iter, flb_list, list) {
kernel/liveupdate/luo_flb.c
449
if (iter->flb == flb) {
kernel/liveupdate/luo_flb.c
450
list_del(&iter->list);
kernel/liveupdate/luo_flb.c
451
kfree(iter);
kernel/locking/lockdep_proc.c
602
struct lock_stat_data *iter;
kernel/locking/lockdep_proc.c
607
iter = data->stats + (*pos - 1);
kernel/locking/lockdep_proc.c
608
if (iter >= data->iter_end)
kernel/locking/lockdep_proc.c
609
iter = NULL;
kernel/locking/lockdep_proc.c
611
return iter;
kernel/locking/lockdep_proc.c
652
struct lock_stat_data *iter = data->stats;
kernel/locking/lockdep_proc.c
659
iter->class = class;
kernel/locking/lockdep_proc.c
660
lock_stats(class, &iter->stats);
kernel/locking/lockdep_proc.c
661
iter++;
kernel/locking/lockdep_proc.c
664
data->iter_end = iter;
kernel/printk/printk.c
4898
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
kernel/printk/printk.c
4908
if (iter->cur_seq < min_seq)
kernel/printk/printk.c
4909
iter->cur_seq = min_seq;
kernel/printk/printk.c
4915
if (!prb_read_valid(prb, iter->cur_seq, &r))
kernel/printk/printk.c
4919
if (!prb_read_valid_info(prb, iter->cur_seq,
kernel/printk/printk.c
4928
iter->cur_seq = r.info->seq + 1;
kernel/printk/printk.c
4956
bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
kernel/printk/printk.c
4971
if (iter->cur_seq < min_seq)
kernel/printk/printk.c
4972
iter->cur_seq = min_seq;
kernel/printk/printk.c
4974
if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
kernel/printk/printk.c
4975
if (info.seq != iter->cur_seq) {
kernel/printk/printk.c
4977
iter->cur_seq = info.seq;
kernel/printk/printk.c
4982
if (iter->cur_seq >= iter->next_seq)
kernel/printk/printk.c
4991
seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
kernel/printk/printk.c
5003
if (r.info->seq >= iter->next_seq)
kernel/printk/printk.c
5012
iter->next_seq = next_seq;
kernel/printk/printk.c
5029
void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
kernel/printk/printk.c
5031
iter->cur_seq = latched_seq_read_nolock(&clear_seq);
kernel/printk/printk.c
5032
iter->next_seq = prb_next_seq(prb);
kernel/rcu/rcutorture.c
3231
static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
kernel/rcu/rcutorture.c
3235
if (need_resched() || (iter & 0xfff))
kernel/sched/ext.c
535
static void scx_task_iter_start(struct scx_task_iter *iter)
kernel/sched/ext.c
537
memset(iter, 0, sizeof(*iter));
kernel/sched/ext.c
541
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
kernel/sched/ext.c
542
list_add(&iter->cursor.tasks_node, &scx_tasks);
kernel/sched/ext.c
543
iter->list_locked = true;
kernel/sched/ext.c
546
static void __scx_task_iter_rq_unlock(struct scx_task_iter *iter)
kernel/sched/ext.c
548
if (iter->locked_task) {
kernel/sched/ext.c
549
__balance_callbacks(iter->rq, &iter->rf);
kernel/sched/ext.c
550
task_rq_unlock(iter->rq, iter->locked_task, &iter->rf);
kernel/sched/ext.c
551
iter->locked_task = NULL;
kernel/sched/ext.c
564
static void scx_task_iter_unlock(struct scx_task_iter *iter)
kernel/sched/ext.c
566
__scx_task_iter_rq_unlock(iter);
kernel/sched/ext.c
567
if (iter->list_locked) {
kernel/sched/ext.c
568
iter->list_locked = false;
kernel/sched/ext.c
573
static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
kernel/sched/ext.c
575
if (!iter->list_locked) {
kernel/sched/ext.c
577
iter->list_locked = true;
kernel/sched/ext.c
589
static void scx_task_iter_stop(struct scx_task_iter *iter)
kernel/sched/ext.c
591
__scx_task_iter_maybe_relock(iter);
kernel/sched/ext.c
592
list_del_init(&iter->cursor.tasks_node);
kernel/sched/ext.c
593
scx_task_iter_unlock(iter);
kernel/sched/ext.c
604
static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter)
kernel/sched/ext.c
606
struct list_head *cursor = &iter->cursor.tasks_node;
kernel/sched/ext.c
609
if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
kernel/sched/ext.c
610
scx_task_iter_unlock(iter);
kernel/sched/ext.c
614
__scx_task_iter_maybe_relock(iter);
kernel/sched/ext.c
637
static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
kernel/sched/ext.c
641
__scx_task_iter_rq_unlock(iter);
kernel/sched/ext.c
643
while ((p = scx_task_iter_next(iter))) {
kernel/sched/ext.c
675
iter->rq = task_rq_lock(p, &iter->rf);
kernel/sched/ext.c
676
iter->locked_task = p;
kernel/sched/psi.c
793
#define for_each_group(iter, group) \
kernel/sched/psi.c
794
for (typeof(group) iter = group; iter; iter = iter->parent)
kernel/sched/rt.c
2944
rt_rq_iter_t iter;
kernel/sched/rt.c
2948
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
kernel/sched/rt.c
511
#define for_each_rt_rq(rt_rq, iter, rq) \
kernel/sched/rt.c
512
for (iter = &root_task_group; \
kernel/sched/rt.c
513
iter && (rt_rq = iter->rt_rq[cpu_of(rq)]); \
kernel/sched/rt.c
514
iter = next_task_group(iter))
kernel/sched/rt.c
620
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
kernel/sched/rt.c
623
if (iter == rt_rq)
kernel/sched/rt.c
626
raw_spin_lock(&iter->rt_runtime_lock);
kernel/sched/rt.c
632
if (iter->rt_runtime == RUNTIME_INF)
kernel/sched/rt.c
639
diff = iter->rt_runtime - iter->rt_time;
kernel/sched/rt.c
644
iter->rt_runtime -= diff;
kernel/sched/rt.c
647
raw_spin_unlock(&iter->rt_runtime_lock);
kernel/sched/rt.c
652
raw_spin_unlock(&iter->rt_runtime_lock);
kernel/sched/rt.c
663
rt_rq_iter_t iter;
kernel/sched/rt.c
669
for_each_rt_rq(rt_rq, iter, rq) {
kernel/sched/rt.c
697
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
kernel/sched/rt.c
703
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
kernel/sched/rt.c
706
raw_spin_lock(&iter->rt_runtime_lock);
kernel/sched/rt.c
708
diff = min_t(s64, iter->rt_runtime, want);
kernel/sched/rt.c
709
iter->rt_runtime -= diff;
kernel/sched/rt.c
712
iter->rt_runtime -= want;
kernel/sched/rt.c
715
raw_spin_unlock(&iter->rt_runtime_lock);
kernel/sched/rt.c
744
rt_rq_iter_t iter;
kernel/sched/rt.c
753
for_each_rt_rq(rt_rq, iter, rq) {
kernel/sched/rt.c
910
#define for_each_rt_rq(rt_rq, iter, rq) \
kernel/sched/rt.c
911
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
kernel/static_call_inline.c
308
struct static_call_site *iter = iter_start;
kernel/static_call_inline.c
310
while (iter < iter_stop) {
kernel/static_call_inline.c
311
if (init || !static_call_is_init(iter)) {
kernel/static_call_inline.c
312
if (addr_conflict(iter, start, end))
kernel/static_call_inline.c
315
iter++;
kernel/time/clockevents.c
693
struct clock_event_device *ce = NULL, *iter;
kernel/time/clockevents.c
701
list_for_each_entry(iter, &clockevent_devices, list) {
kernel/time/clockevents.c
702
if (!strcmp(iter->name, name)) {
kernel/time/clockevents.c
703
ret = __clockevents_try_unbind(iter, dev->id);
kernel/time/clockevents.c
704
ce = iter;
kernel/time/timer_list.c
288
struct timer_list_iter *iter = v;
kernel/time/timer_list.c
290
if (iter->cpu == -1 && !iter->second_pass)
kernel/time/timer_list.c
291
timer_list_header(m, iter->now);
kernel/time/timer_list.c
292
else if (!iter->second_pass)
kernel/time/timer_list.c
293
print_cpu(m, iter->cpu, iter->now);
kernel/time/timer_list.c
295
else if (iter->cpu == -1 && iter->second_pass)
kernel/time/timer_list.c
298
print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
kernel/time/timer_list.c
303
static void *move_iter(struct timer_list_iter *iter, loff_t offset)
kernel/time/timer_list.c
306
iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
kernel/time/timer_list.c
307
if (iter->cpu >= nr_cpu_ids) {
kernel/time/timer_list.c
309
if (!iter->second_pass) {
kernel/time/timer_list.c
310
iter->cpu = -1;
kernel/time/timer_list.c
311
iter->second_pass = true;
kernel/time/timer_list.c
319
return iter;
kernel/time/timer_list.c
324
struct timer_list_iter *iter = file->private;
kernel/time/timer_list.c
327
iter->now = ktime_to_ns(ktime_get());
kernel/time/timer_list.c
328
iter->cpu = -1;
kernel/time/timer_list.c
329
iter->second_pass = false;
kernel/time/timer_list.c
330
return move_iter(iter, *offset);
kernel/time/timer_list.c
335
struct timer_list_iter *iter = file->private;
kernel/time/timer_list.c
337
return move_iter(iter, 1);
kernel/trace/blktrace.c
1490
typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
kernel/trace/blktrace.c
1493
static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
kernel/trace/blktrace.c
1497
unsigned long long ts = iter->ts;
kernel/trace/blktrace.c
1500
const struct blk_io_trace2 *t = te_blk_io_trace(iter->ent);
kernel/trace/blktrace.c
1504
trace_seq_printf(&iter->seq,
kernel/trace/blktrace.c
1506
MAJOR(t->device), MINOR(t->device), iter->cpu,
kernel/trace/blktrace.c
1507
secs, nsec_rem, iter->ent->pid, act, rwbs);
kernel/trace/blktrace.c
1510
static void blk_log_action(struct trace_iterator *iter, const char *act,
kernel/trace/blktrace.c
1514
const struct blk_io_trace2 *t = te_blk_io_trace(iter->ent);
kernel/trace/blktrace.c
1518
u64 id = t_cgid(iter->ent);
kernel/trace/blktrace.c
1525
trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
kernel/trace/blktrace.c
1542
trace_seq_printf(&iter->seq,
kernel/trace/blktrace.c
1548
trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
kernel/trace/blktrace.c
1730
static enum print_line_t print_one_line(struct trace_iterator *iter,
kernel/trace/blktrace.c
1733
struct trace_array *tr = iter->tr;
kernel/trace/blktrace.c
1734
struct trace_seq *s = &iter->seq;
kernel/trace/blktrace.c
1741
t = te_blk_io_trace(iter->ent);
kernel/trace/blktrace.c
1748
log_action(iter, long_act ? "message" : "m", has_cg);
kernel/trace/blktrace.c
1749
blk_log_msg(s, iter->ent, has_cg);
kernel/trace/blktrace.c
1756
log_action(iter, what2act[what].act[long_act], has_cg);
kernel/trace/blktrace.c
1757
what2act[what].print(s, iter->ent, has_cg);
kernel/trace/blktrace.c
1763
static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
kernel/trace/blktrace.c
1766
return print_one_line(iter, false);
kernel/trace/blktrace.c
1769
static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
kernel/trace/blktrace.c
1771
struct trace_seq *s = &iter->seq;
kernel/trace/blktrace.c
1772
struct blk_io_trace2 *t = (struct blk_io_trace2 *)iter->ent;
kernel/trace/blktrace.c
1776
.time = iter->ts,
kernel/trace/blktrace.c
1785
blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
kernel/trace/blktrace.c
1788
blk_trace_synthesize_old_trace(iter);
kernel/trace/blktrace.c
1790
return trace_handle_return(&iter->seq);
kernel/trace/blktrace.c
1793
static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
kernel/trace/blktrace.c
1795
if ((iter->ent->type != TRACE_BLK) ||
kernel/trace/blktrace.c
1799
return print_one_line(iter, true);
kernel/trace/fprobe.c
594
struct rhashtable_iter iter;
kernel/trace/fprobe.c
606
rhltable_walk_enter(&fprobe_ip_table, &iter);
kernel/trace/fprobe.c
608
rhashtable_walk_start(&iter);
kernel/trace/fprobe.c
610
while ((node = rhashtable_walk_next(&iter)) && !IS_ERR(node))
kernel/trace/fprobe.c
613
rhashtable_walk_stop(&iter);
kernel/trace/fprobe.c
615
rhashtable_walk_exit(&iter);
kernel/trace/ftrace.c
2814
struct ftrace_rec_iter *iter = &ftrace_rec_iter;
kernel/trace/ftrace.c
2816
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
2817
iter->index = 0;
kernel/trace/ftrace.c
2820
while (iter->pg && !iter->pg->index)
kernel/trace/ftrace.c
2821
iter->pg = iter->pg->next;
kernel/trace/ftrace.c
2823
if (!iter->pg)
kernel/trace/ftrace.c
2826
return iter;
kernel/trace/ftrace.c
2835
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
kernel/trace/ftrace.c
2837
iter->index++;
kernel/trace/ftrace.c
2839
if (iter->index >= iter->pg->index) {
kernel/trace/ftrace.c
2840
iter->pg = iter->pg->next;
kernel/trace/ftrace.c
2841
iter->index = 0;
kernel/trace/ftrace.c
2844
while (iter->pg && !iter->pg->index)
kernel/trace/ftrace.c
2845
iter->pg = iter->pg->next;
kernel/trace/ftrace.c
2848
if (!iter->pg)
kernel/trace/ftrace.c
2851
return iter;
kernel/trace/ftrace.c
2860
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
kernel/trace/ftrace.c
2862
return &iter->pg->records[iter->index];
kernel/trace/ftrace.c
3970
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
3971
struct trace_array *tr = iter->ops->private;
kernel/trace/ftrace.c
3980
iter->pos = *pos;
kernel/trace/ftrace.c
3989
if (!iter->probe) {
kernel/trace/ftrace.c
3991
iter->probe = list_entry(next, struct ftrace_func_probe, list);
kernel/trace/ftrace.c
3994
if (iter->probe_entry)
kernel/trace/ftrace.c
3995
hnd = &iter->probe_entry->hlist;
kernel/trace/ftrace.c
3997
hash = iter->probe->ops.func_hash->filter_hash;
kernel/trace/ftrace.c
4009
if (iter->pidx >= size) {
kernel/trace/ftrace.c
4010
if (iter->probe->list.next == func_probes)
kernel/trace/ftrace.c
4012
next = iter->probe->list.next;
kernel/trace/ftrace.c
4013
iter->probe = list_entry(next, struct ftrace_func_probe, list);
kernel/trace/ftrace.c
4014
hash = iter->probe->ops.func_hash->filter_hash;
kernel/trace/ftrace.c
4016
iter->pidx = 0;
kernel/trace/ftrace.c
4019
hhd = &hash->buckets[iter->pidx];
kernel/trace/ftrace.c
4022
iter->pidx++;
kernel/trace/ftrace.c
4032
iter->pidx++;
kernel/trace/ftrace.c
4040
iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
kernel/trace/ftrace.c
4042
return iter;
kernel/trace/ftrace.c
4047
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4051
if (!(iter->flags & FTRACE_ITER_DO_PROBES))
kernel/trace/ftrace.c
4054
if (iter->mod_pos > *pos)
kernel/trace/ftrace.c
4057
iter->probe = NULL;
kernel/trace/ftrace.c
4058
iter->probe_entry = NULL;
kernel/trace/ftrace.c
4059
iter->pidx = 0;
kernel/trace/ftrace.c
4060
for (l = 0; l <= (*pos - iter->mod_pos); ) {
kernel/trace/ftrace.c
4069
iter->flags |= FTRACE_ITER_PROBE;
kernel/trace/ftrace.c
4071
return iter;
kernel/trace/ftrace.c
4075
t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
kernel/trace/ftrace.c
4081
probe = iter->probe;
kernel/trace/ftrace.c
4082
probe_entry = iter->probe_entry;
kernel/trace/ftrace.c
4101
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4102
struct trace_array *tr = iter->tr;
kernel/trace/ftrace.c
4105
iter->pos = *pos;
kernel/trace/ftrace.c
4107
iter->mod_list = iter->mod_list->next;
kernel/trace/ftrace.c
4109
if (iter->mod_list == &tr->mod_trace ||
kernel/trace/ftrace.c
4110
iter->mod_list == &tr->mod_notrace) {
kernel/trace/ftrace.c
4111
iter->flags &= ~FTRACE_ITER_MOD;
kernel/trace/ftrace.c
4115
iter->mod_pos = *pos;
kernel/trace/ftrace.c
4117
return iter;
kernel/trace/ftrace.c
4122
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4126
if (iter->func_pos > *pos)
kernel/trace/ftrace.c
4129
iter->mod_pos = iter->func_pos;
kernel/trace/ftrace.c
4132
if (!iter->tr)
kernel/trace/ftrace.c
4135
for (l = 0; l <= (*pos - iter->func_pos); ) {
kernel/trace/ftrace.c
4141
iter->flags &= ~FTRACE_ITER_MOD;
kernel/trace/ftrace.c
4146
iter->flags |= FTRACE_ITER_MOD;
kernel/trace/ftrace.c
4148
return iter;
kernel/trace/ftrace.c
4152
t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
kernel/trace/ftrace.c
4155
struct trace_array *tr = iter->tr;
kernel/trace/ftrace.c
4157
if (WARN_ON_ONCE(!iter->mod_list) ||
kernel/trace/ftrace.c
4158
iter->mod_list == &tr->mod_trace ||
kernel/trace/ftrace.c
4159
iter->mod_list == &tr->mod_notrace)
kernel/trace/ftrace.c
4162
ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
kernel/trace/ftrace.c
4177
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4183
if (iter->idx >= iter->pg->index) {
kernel/trace/ftrace.c
4184
if (iter->pg->next) {
kernel/trace/ftrace.c
4185
iter->pg = iter->pg->next;
kernel/trace/ftrace.c
4186
iter->idx = 0;
kernel/trace/ftrace.c
4190
rec = &iter->pg->records[iter->idx++];
kernel/trace/ftrace.c
4191
if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
kernel/trace/ftrace.c
4192
!ftrace_lookup_ip(iter->hash, rec->ip)) ||
kernel/trace/ftrace.c
4194
((iter->flags & FTRACE_ITER_ENABLED) &&
kernel/trace/ftrace.c
4197
((iter->flags & FTRACE_ITER_TOUCHED) &&
kernel/trace/ftrace.c
4208
iter->pos = iter->func_pos = *pos;
kernel/trace/ftrace.c
4209
iter->func = rec;
kernel/trace/ftrace.c
4211
return iter;
kernel/trace/ftrace.c
4217
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4224
if (iter->flags & FTRACE_ITER_PROBE)
kernel/trace/ftrace.c
4227
if (iter->flags & FTRACE_ITER_MOD)
kernel/trace/ftrace.c
4230
if (iter->flags & FTRACE_ITER_PRINTALL) {
kernel/trace/ftrace.c
4244
static void reset_iter_read(struct ftrace_iterator *iter)
kernel/trace/ftrace.c
4246
iter->pos = 0;
kernel/trace/ftrace.c
4247
iter->func_pos = 0;
kernel/trace/ftrace.c
4248
iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
kernel/trace/ftrace.c
4253
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4265
if (*pos < iter->pos)
kernel/trace/ftrace.c
4266
reset_iter_read(iter);
kernel/trace/ftrace.c
4273
if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
kernel/trace/ftrace.c
4274
ftrace_hash_empty(iter->hash)) {
kernel/trace/ftrace.c
4275
iter->func_pos = 1; /* Account for the message */
kernel/trace/ftrace.c
4278
iter->flags |= FTRACE_ITER_PRINTALL;
kernel/trace/ftrace.c
4280
iter->flags &= ~FTRACE_ITER_PROBE;
kernel/trace/ftrace.c
4281
return iter;
kernel/trace/ftrace.c
4284
if (iter->flags & FTRACE_ITER_MOD)
kernel/trace/ftrace.c
4292
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
4293
iter->idx = 0;
kernel/trace/ftrace.c
4303
return iter;
kernel/trace/ftrace.c
4464
struct ftrace_iterator *iter = m->private;
kernel/trace/ftrace.c
4467
if (iter->flags & FTRACE_ITER_PROBE)
kernel/trace/ftrace.c
4468
return t_probe_show(m, iter);
kernel/trace/ftrace.c
4470
if (iter->flags & FTRACE_ITER_MOD)
kernel/trace/ftrace.c
4471
return t_mod_show(m, iter);
kernel/trace/ftrace.c
4473
if (iter->flags & FTRACE_ITER_PRINTALL) {
kernel/trace/ftrace.c
4474
if (iter->flags & FTRACE_ITER_NOTRACE)
kernel/trace/ftrace.c
4481
rec = iter->func;
kernel/trace/ftrace.c
4486
if (iter->flags & FTRACE_ITER_ADDRS)
kernel/trace/ftrace.c
4496
if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
kernel/trace/ftrace.c
4559
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
4569
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
kernel/trace/ftrace.c
4570
if (!iter)
kernel/trace/ftrace.c
4573
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
4574
iter->ops = &global_ops;
kernel/trace/ftrace.c
4582
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
4593
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
kernel/trace/ftrace.c
4594
if (!iter)
kernel/trace/ftrace.c
4597
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
4598
iter->flags = FTRACE_ITER_ENABLED;
kernel/trace/ftrace.c
4599
iter->ops = &global_ops;
kernel/trace/ftrace.c
4607
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
4618
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
kernel/trace/ftrace.c
4619
if (!iter)
kernel/trace/ftrace.c
4622
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
4623
iter->flags = FTRACE_ITER_TOUCHED;
kernel/trace/ftrace.c
4624
iter->ops = &global_ops;
kernel/trace/ftrace.c
4632
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
4642
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
kernel/trace/ftrace.c
4643
if (!iter)
kernel/trace/ftrace.c
4646
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
4647
iter->flags = FTRACE_ITER_ADDRS;
kernel/trace/ftrace.c
4648
iter->ops = &global_ops;
kernel/trace/ftrace.c
4675
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
4689
iter = kzalloc_obj(*iter);
kernel/trace/ftrace.c
4690
if (!iter)
kernel/trace/ftrace.c
4693
if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
kernel/trace/ftrace.c
4696
iter->ops = ops;
kernel/trace/ftrace.c
4697
iter->flags = flag;
kernel/trace/ftrace.c
4698
iter->tr = tr;
kernel/trace/ftrace.c
4710
iter->mod_list = mod_head;
kernel/trace/ftrace.c
4716
iter->hash = alloc_ftrace_hash(size_bits);
kernel/trace/ftrace.c
4719
iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
kernel/trace/ftrace.c
4723
iter->hash = alloc_and_copy_ftrace_hash(hash->size_bits, hash);
kernel/trace/ftrace.c
4725
iter->hash = EMPTY_HASH;
kernel/trace/ftrace.c
4728
if (!iter->hash) {
kernel/trace/ftrace.c
4729
trace_parser_put(&iter->parser);
kernel/trace/ftrace.c
4736
iter->pg = ftrace_pages_start;
kernel/trace/ftrace.c
4741
m->private = iter;
kernel/trace/ftrace.c
4744
free_ftrace_hash(iter->hash);
kernel/trace/ftrace.c
4745
trace_parser_put(&iter->parser);
kernel/trace/ftrace.c
4748
file->private_data = iter;
kernel/trace/ftrace.c
4755
kfree(iter);
kernel/trace/ftrace.c
5450
struct ftrace_func_probe *probe = NULL, *iter;
kernel/trace/ftrace.c
5470
list_for_each_entry(iter, &tr->func_probes, list) {
kernel/trace/ftrace.c
5471
if (iter->probe_ops == probe_ops) {
kernel/trace/ftrace.c
5472
probe = iter;
kernel/trace/ftrace.c
5594
struct ftrace_func_probe *probe = NULL, *iter;
kernel/trace/ftrace.c
5624
list_for_each_entry(iter, &tr->func_probes, list) {
kernel/trace/ftrace.c
5625
if (iter->probe_ops == probe_ops) {
kernel/trace/ftrace.c
5626
probe = iter;
kernel/trace/ftrace.c
5770
static int ftrace_process_regex(struct ftrace_iterator *iter,
kernel/trace/ftrace.c
5773
struct ftrace_hash *hash = iter->hash;
kernel/trace/ftrace.c
5774
struct trace_array *tr = iter->ops->private;
kernel/trace/ftrace.c
5808
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
5817
iter = m->private;
kernel/trace/ftrace.c
5819
iter = file->private_data;
kernel/trace/ftrace.c
5826
parser = &iter->parser;
kernel/trace/ftrace.c
5831
ret = ftrace_process_regex(iter, parser->buffer,
kernel/trace/ftrace.c
6176
struct ftrace_func_entry *entry, *iter;
kernel/trace/ftrace.c
6212
hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
kernel/trace/ftrace.c
6213
entry = __ftrace_lookup_ip(direct_functions, iter->ip);
kernel/trace/ftrace.c
6948
struct ftrace_iterator *iter;
kernel/trace/ftrace.c
6954
iter = m->private;
kernel/trace/ftrace.c
6957
iter = file->private_data;
kernel/trace/ftrace.c
6959
parser = &iter->parser;
kernel/trace/ftrace.c
6961
int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
kernel/trace/ftrace.c
6963
ftrace_process_regex(iter, parser->buffer,
kernel/trace/ftrace.c
6969
mutex_lock(&iter->ops->func_hash->regex_lock);
kernel/trace/ftrace.c
6972
filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
kernel/trace/ftrace.c
6975
orig_hash = &iter->ops->func_hash->filter_hash;
kernel/trace/ftrace.c
6976
if (iter->tr) {
kernel/trace/ftrace.c
6977
if (list_empty(&iter->tr->mod_trace))
kernel/trace/ftrace.c
6978
iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
kernel/trace/ftrace.c
6980
iter->hash->flags |= FTRACE_HASH_FL_MOD;
kernel/trace/ftrace.c
6983
orig_hash = &iter->ops->func_hash->notrace_hash;
kernel/trace/ftrace.c
6986
ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
kernel/trace/ftrace.c
6987
iter->hash, filter_hash);
kernel/trace/ftrace.c
6991
mutex_unlock(&iter->ops->func_hash->regex_lock);
kernel/trace/ftrace.c
6992
free_ftrace_hash(iter->hash);
kernel/trace/ftrace.c
6993
if (iter->tr)
kernel/trace/ftrace.c
6994
trace_array_put(iter->tr);
kernel/trace/ftrace.c
6995
kfree(iter);
kernel/trace/ring_buffer.c
3181
rb_iter_head_event(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
3184
struct buffer_page *iter_head_page = iter->head_page;
kernel/trace/ring_buffer.c
3188
if (iter->head != iter->next_event)
kernel/trace/ring_buffer.c
3189
return iter->event;
kernel/trace/ring_buffer.c
3200
if (iter->head > commit - 8)
kernel/trace/ring_buffer.c
3203
event = __rb_page_index(iter_head_page, iter->head);
kernel/trace/ring_buffer.c
3212
if ((iter->head + length) > commit || length > iter->event_size)
kernel/trace/ring_buffer.c
3216
memcpy(iter->event, event, length);
kernel/trace/ring_buffer.c
3224
if (iter->page_stamp != iter_head_page->page->time_stamp ||
kernel/trace/ring_buffer.c
3228
iter->next_event = iter->head + length;
kernel/trace/ring_buffer.c
3229
return iter->event;
kernel/trace/ring_buffer.c
3232
iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
kernel/trace/ring_buffer.c
3233
iter->head = 0;
kernel/trace/ring_buffer.c
3234
iter->next_event = 0;
kernel/trace/ring_buffer.c
3235
iter->missed_events = 1;
kernel/trace/ring_buffer.c
3261
static void rb_inc_iter(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
3263
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
3271
if (iter->head_page == cpu_buffer->reader_page)
kernel/trace/ring_buffer.c
3272
iter->head_page = rb_set_head_page(cpu_buffer);
kernel/trace/ring_buffer.c
3274
rb_inc_page(&iter->head_page);
kernel/trace/ring_buffer.c
3276
iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
kernel/trace/ring_buffer.c
3277
iter->head = 0;
kernel/trace/ring_buffer.c
3278
iter->next_event = 0;
kernel/trace/ring_buffer.c
5277
static void rb_iter_reset(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
5279
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
5282
iter->head_page = cpu_buffer->reader_page;
kernel/trace/ring_buffer.c
5283
iter->head = cpu_buffer->reader_page->read;
kernel/trace/ring_buffer.c
5284
iter->next_event = iter->head;
kernel/trace/ring_buffer.c
5286
iter->cache_reader_page = iter->head_page;
kernel/trace/ring_buffer.c
5287
iter->cache_read = cpu_buffer->read;
kernel/trace/ring_buffer.c
5288
iter->cache_pages_removed = cpu_buffer->pages_removed;
kernel/trace/ring_buffer.c
5290
if (iter->head) {
kernel/trace/ring_buffer.c
5291
iter->read_stamp = cpu_buffer->read_stamp;
kernel/trace/ring_buffer.c
5292
iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
kernel/trace/ring_buffer.c
5294
iter->read_stamp = iter->head_page->page->time_stamp;
kernel/trace/ring_buffer.c
5295
iter->page_stamp = iter->read_stamp;
kernel/trace/ring_buffer.c
5306
void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
5311
if (!iter)
kernel/trace/ring_buffer.c
5314
cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
5317
rb_iter_reset(iter);
kernel/trace/ring_buffer.c
5326
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
5337
cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
5363
return ((iter->head_page == commit_page && iter->head >= commit) ||
kernel/trace/ring_buffer.c
5364
(iter->head_page == reader && commit_page == head_page &&
kernel/trace/ring_buffer.c
5366
iter->head == rb_page_size(cpu_buffer->reader_page)));
kernel/trace/ring_buffer.c
5401
rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
kernel/trace/ring_buffer.c
5412
iter->read_stamp += delta;
kernel/trace/ring_buffer.c
5417
delta = rb_fix_abs_ts(delta, iter->read_stamp);
kernel/trace/ring_buffer.c
5418
iter->read_stamp = delta;
kernel/trace/ring_buffer.c
5422
iter->read_stamp += event->time_delta;
kernel/trace/ring_buffer.c
5426
RB_WARN_ON(iter->cpu_buffer, 1);
kernel/trace/ring_buffer.c
5625
static void rb_advance_iter(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
5629
cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
5632
if (iter->head == iter->next_event) {
kernel/trace/ring_buffer.c
5634
if (rb_iter_head_event(iter) == NULL)
kernel/trace/ring_buffer.c
5638
iter->head = iter->next_event;
kernel/trace/ring_buffer.c
5643
if (iter->next_event >= rb_page_size(iter->head_page)) {
kernel/trace/ring_buffer.c
5645
if (iter->head_page == cpu_buffer->commit_page)
kernel/trace/ring_buffer.c
5647
rb_inc_iter(iter);
kernel/trace/ring_buffer.c
5651
rb_update_iter_read_stamp(iter, iter->event);
kernel/trace/ring_buffer.c
5734
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
kernel/trace/ring_buffer.c
5744
cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
5752
if (unlikely(iter->cache_read != cpu_buffer->read ||
kernel/trace/ring_buffer.c
5753
iter->cache_reader_page != cpu_buffer->reader_page ||
kernel/trace/ring_buffer.c
5754
iter->cache_pages_removed != cpu_buffer->pages_removed))
kernel/trace/ring_buffer.c
5755
rb_iter_reset(iter);
kernel/trace/ring_buffer.c
5758
if (ring_buffer_iter_empty(iter))
kernel/trace/ring_buffer.c
5774
if (iter->head >= rb_page_size(iter->head_page)) {
kernel/trace/ring_buffer.c
5775
rb_inc_iter(iter);
kernel/trace/ring_buffer.c
5779
event = rb_iter_head_event(iter);
kernel/trace/ring_buffer.c
5786
rb_inc_iter(iter);
kernel/trace/ring_buffer.c
5789
rb_advance_iter(iter);
kernel/trace/ring_buffer.c
5794
rb_advance_iter(iter);
kernel/trace/ring_buffer.c
5800
*ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
kernel/trace/ring_buffer.c
5805
rb_advance_iter(iter);
kernel/trace/ring_buffer.c
5810
*ts = iter->read_stamp + event->time_delta;
kernel/trace/ring_buffer.c
5897
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
5899
bool ret = iter->missed_events != 0;
kernel/trace/ring_buffer.c
5901
iter->missed_events = 0;
kernel/trace/ring_buffer.c
5915
ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
kernel/trace/ring_buffer.c
5917
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
5923
event = rb_iter_peek(iter, ts);
kernel/trace/ring_buffer.c
5999
struct ring_buffer_iter *iter;
kernel/trace/ring_buffer.c
6004
iter = kzalloc_obj(*iter, flags);
kernel/trace/ring_buffer.c
6005
if (!iter)
kernel/trace/ring_buffer.c
6009
iter->event_size = buffer->subbuf_size;
kernel/trace/ring_buffer.c
6010
iter->event = kmalloc(iter->event_size, flags);
kernel/trace/ring_buffer.c
6011
if (!iter->event) {
kernel/trace/ring_buffer.c
6012
kfree(iter);
kernel/trace/ring_buffer.c
6018
iter->cpu_buffer = cpu_buffer;
kernel/trace/ring_buffer.c
6024
rb_iter_reset(iter);
kernel/trace/ring_buffer.c
6027
return iter;
kernel/trace/ring_buffer.c
6038
ring_buffer_read_finish(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
6040
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
6046
kfree(iter->event);
kernel/trace/ring_buffer.c
6047
kfree(iter);
kernel/trace/ring_buffer.c
6058
void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
kernel/trace/ring_buffer.c
6060
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
kernel/trace/ring_buffer.c
6065
rb_advance_iter(iter);
kernel/trace/trace.c
10307
static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
kernel/trace/trace.c
10309
iter->tr = tr;
kernel/trace/trace.c
10310
iter->trace = iter->tr->current_trace;
kernel/trace/trace.c
10311
iter->cpu_file = RING_BUFFER_ALL_CPUS;
kernel/trace/trace.c
10312
iter->array_buffer = &tr->array_buffer;
kernel/trace/trace.c
10314
if (iter->trace && iter->trace->open)
kernel/trace/trace.c
10315
iter->trace->open(iter);
kernel/trace/trace.c
10318
if (ring_buffer_overruns(iter->array_buffer->buffer))
kernel/trace/trace.c
10319
iter->iter_flags |= TRACE_FILE_ANNOTATE;
kernel/trace/trace.c
10322
if (trace_clocks[iter->tr->clock_id].in_ns)
kernel/trace/trace.c
10323
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
kernel/trace/trace.c
10326
iter->temp = static_temp_buf;
kernel/trace/trace.c
10327
iter->temp_size = STATIC_TEMP_BUF_SIZE;
kernel/trace/trace.c
10328
iter->fmt = static_fmt_buf;
kernel/trace/trace.c
10329
iter->fmt_size = STATIC_FMT_BUF_SIZE;
kernel/trace/trace.c
10332
void trace_init_global_iter(struct trace_iterator *iter)
kernel/trace/trace.c
10334
trace_init_iter(iter, &global_trace);
kernel/trace/trace.c
10340
static struct trace_iterator iter;
kernel/trace/trace.c
10358
trace_init_iter(&iter, tr);
kernel/trace/trace.c
10369
iter.cpu_file = raw_smp_processor_id();
kernel/trace/trace.c
10371
iter.cpu_file = RING_BUFFER_ALL_CPUS;
kernel/trace/trace.c
10391
while (!trace_empty(&iter)) {
kernel/trace/trace.c
10398
trace_iterator_reset(&iter);
kernel/trace/trace.c
10399
iter.iter_flags |= TRACE_FILE_LAT_FMT;
kernel/trace/trace.c
10401
if (trace_find_next_entry_inc(&iter) != NULL) {
kernel/trace/trace.c
10404
ret = print_trace_line(&iter);
kernel/trace/trace.c
10406
trace_consume(&iter);
kernel/trace/trace.c
10408
trace_printk_seq(&iter.seq);
kernel/trace/trace.c
1691
struct trace_iterator *iter;
kernel/trace/trace.c
1698
struct trace_iterator *iter = pwait->iter;
kernel/trace/trace.c
1700
if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
kernel/trace/trace.c
1703
return iter->closed;
kernel/trace/trace.c
1706
static int wait_on_pipe(struct trace_iterator *iter, int full)
kernel/trace/trace.c
1712
if (trace_buffer_iter(iter, iter->cpu_file))
kernel/trace/trace.c
1715
pwait.wait_index = atomic_read_acquire(&iter->wait_index);
kernel/trace/trace.c
1716
pwait.iter = iter;
kernel/trace/trace.c
1718
ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
kernel/trace/trace.c
1726
if (iter->snapshot)
kernel/trace/trace.c
1727
iter->array_buffer = &iter->tr->snapshot_buffer;
kernel/trace/trace.c
2418
struct trace_iterator *iter = tracepoint_print_iter;
kernel/trace/trace.c
2421
if (WARN_ON_ONCE(!iter))
kernel/trace/trace.c
2438
trace_seq_init(&iter->seq);
kernel/trace/trace.c
2439
iter->ent = fbuffer->entry;
kernel/trace/trace.c
2440
event_call->event.funcs->trace(iter, 0, event);
kernel/trace/trace.c
2441
trace_seq_putc(&iter->seq, 0);
kernel/trace/trace.c
2442
printk("%s", iter->seq.buffer);
kernel/trace/trace.c
2809
static void trace_iterator_increment(struct trace_iterator *iter)
kernel/trace/trace.c
2811
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
kernel/trace/trace.c
2813
iter->idx++;
kernel/trace/trace.c
2819
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
kernel/trace/trace.c
2823
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
2831
event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
kernel/trace/trace.c
2836
iter->ent_size = ring_buffer_event_length(event);
kernel/trace/trace.c
2839
iter->ent_size = 0;
kernel/trace/trace.c
2844
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
kernel/trace/trace.c
2847
struct trace_buffer *buffer = iter->array_buffer->buffer;
kernel/trace/trace.c
2850
int cpu_file = iter->cpu_file;
kernel/trace/trace.c
2863
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
kernel/trace/trace.c
2875
ent = peek_next_entry(iter, cpu, &ts, &lost_events);
kernel/trace/trace.c
2885
next_size = iter->ent_size;
kernel/trace/trace.c
2889
iter->ent_size = next_size;
kernel/trace/trace.c
2906
char *trace_iter_expand_format(struct trace_iterator *iter)
kernel/trace/trace.c
2914
if (!iter->tr || iter->fmt == static_fmt_buf)
kernel/trace/trace.c
2917
tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
kernel/trace/trace.c
2920
iter->fmt_size += STATIC_FMT_BUF_SIZE;
kernel/trace/trace.c
2921
iter->fmt = tmp;
kernel/trace/trace.c
2928
static bool trace_safe_str(struct trace_iterator *iter, const char *str)
kernel/trace/trace.c
2935
if ((addr >= (unsigned long)iter->ent) &&
kernel/trace/trace.c
2936
(addr < (unsigned long)iter->ent + iter->ent_size))
kernel/trace/trace.c
2940
if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
kernel/trace/trace.c
2941
(addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
kernel/trace/trace.c
2955
if (!iter->ent)
kernel/trace/trace.c
2958
trace_event = ftrace_find_event(iter->ent->type);
kernel/trace/trace.c
2994
bool ignore_event(struct trace_iterator *iter)
kernel/trace/trace.c
3003
trace_event = ftrace_find_event(iter->ent->type);
kernel/trace/trace.c
3005
seq = &iter->seq;
kernel/trace/trace.c
3008
trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
kernel/trace/trace.c
3024
ptr = iter->ent;
kernel/trace/trace.c
3035
good = trace_safe_str(iter, str);
kernel/trace/trace.c
3056
const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
kernel/trace/trace.c
3064
if (!iter->tr || iter->tr->trace_flags & TRACE_ITER(HASH_PTR))
kernel/trace/trace.c
3068
new_fmt = q = iter->fmt;
kernel/trace/trace.c
3070
if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
kernel/trace/trace.c
3071
if (!trace_iter_expand_format(iter))
kernel/trace/trace.c
3074
q += iter->fmt - new_fmt;
kernel/trace/trace.c
3075
new_fmt = iter->fmt;
kernel/trace/trace.c
3099
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
kernel/trace/trace.c
3103
int ent_size = iter->ent_size;
kernel/trace/trace.c
3114
if (iter->temp == static_temp_buf &&
kernel/trace/trace.c
3123
if (iter->ent && iter->ent != iter->temp) {
kernel/trace/trace.c
3124
if ((!iter->temp || iter->temp_size < iter->ent_size) &&
kernel/trace/trace.c
3125
!WARN_ON_ONCE(iter->temp == static_temp_buf)) {
kernel/trace/trace.c
3127
temp = kmalloc(iter->ent_size, GFP_KERNEL);
kernel/trace/trace.c
3130
kfree(iter->temp);
kernel/trace/trace.c
3131
iter->temp = temp;
kernel/trace/trace.c
3132
iter->temp_size = iter->ent_size;
kernel/trace/trace.c
3134
memcpy(iter->temp, iter->ent, iter->ent_size);
kernel/trace/trace.c
3135
iter->ent = iter->temp;
kernel/trace/trace.c
3137
entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
kernel/trace/trace.c
3139
iter->ent_size = ent_size;
kernel/trace/trace.c
3145
void *trace_find_next_entry_inc(struct trace_iterator *iter)
kernel/trace/trace.c
3147
iter->ent = __find_next_entry(iter, &iter->cpu,
kernel/trace/trace.c
3148
&iter->lost_events, &iter->ts);
kernel/trace/trace.c
3150
if (iter->ent)
kernel/trace/trace.c
3151
trace_iterator_increment(iter);
kernel/trace/trace.c
3153
return iter->ent ? iter : NULL;
kernel/trace/trace.c
3156
static void trace_consume(struct trace_iterator *iter)
kernel/trace/trace.c
3158
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
kernel/trace/trace.c
3159
&iter->lost_events);
kernel/trace/trace.c
3164
struct trace_iterator *iter = m->private;
kernel/trace/trace.c
3168
WARN_ON_ONCE(iter->leftover);
kernel/trace/trace.c
3173
if (iter->idx > i)
kernel/trace/trace.c
3176
if (iter->idx < 0)
kernel/trace/trace.c
3177
ent = trace_find_next_entry_inc(iter);
kernel/trace/trace.c
3179
ent = iter;
kernel/trace/trace.c
3181
while (ent && iter->idx < i)
kernel/trace/trace.c
3182
ent = trace_find_next_entry_inc(iter);
kernel/trace/trace.c
3184
iter->pos = *pos;
kernel/trace/trace.c
3189
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
kernel/trace/trace.c
3195
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
kernel/trace/trace.c
3197
buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
3209
if (ts >= iter->array_buffer->time_start)
kernel/trace/trace.c
3217
per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
kernel/trace/trace.c
3226
struct trace_iterator *iter = m->private;
kernel/trace/trace.c
3227
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3228
int cpu_file = iter->cpu_file;
kernel/trace/trace.c
3234
if (unlikely(tr->current_trace != iter->trace)) {
kernel/trace/trace.c
3236
if (iter->trace->close)
kernel/trace/trace.c
3237
iter->trace->close(iter);
kernel/trace/trace.c
3238
iter->trace = tr->current_trace;
kernel/trace/trace.c
3240
if (iter->trace->open)
kernel/trace/trace.c
3241
iter->trace->open(iter);
kernel/trace/trace.c
3245
if (iter->snapshot && tracer_uses_snapshot(iter->trace))
kernel/trace/trace.c
3248
if (*pos != iter->pos) {
kernel/trace/trace.c
3249
iter->ent = NULL;
kernel/trace/trace.c
3250
iter->cpu = 0;
kernel/trace/trace.c
3251
iter->idx = -1;
kernel/trace/trace.c
3255
tracing_iter_reset(iter, cpu);
kernel/trace/trace.c
3257
tracing_iter_reset(iter, cpu_file);
kernel/trace/trace.c
3259
iter->leftover = 0;
kernel/trace/trace.c
3260
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
kernel/trace/trace.c
3268
if (iter->leftover)
kernel/trace/trace.c
3269
p = iter;
kernel/trace/trace.c
3283
struct trace_iterator *iter = m->private;
kernel/trace/trace.c
3285
if (iter->snapshot && tracer_uses_snapshot(iter->trace))
kernel/trace/trace.c
3288
trace_access_unlock(iter->cpu_file);
kernel/trace/trace.c
3410
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
kernel/trace/trace.c
3413
struct array_buffer *buf = iter->array_buffer;
kernel/trace/trace.c
3415
struct tracer *type = iter->trace;
kernel/trace/trace.c
3450
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
kernel/trace/trace.c
3451
trace_print_seq(m, &iter->seq);
kernel/trace/trace.c
3453
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
kernel/trace/trace.c
3454
trace_print_seq(m, &iter->seq);
kernel/trace/trace.c
3461
static void test_cpu_buff_start(struct trace_iterator *iter)
kernel/trace/trace.c
3463
struct trace_seq *s = &iter->seq;
kernel/trace/trace.c
3464
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3469
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
kernel/trace/trace.c
3472
if (cpumask_available(iter->started) &&
kernel/trace/trace.c
3473
cpumask_test_cpu(iter->cpu, iter->started))
kernel/trace/trace.c
3476
if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
kernel/trace/trace.c
3479
if (cpumask_available(iter->started))
kernel/trace/trace.c
3480
cpumask_set_cpu(iter->cpu, iter->started);
kernel/trace/trace.c
3483
if (iter->idx > 1)
kernel/trace/trace.c
3485
iter->cpu);
kernel/trace/trace.c
3504
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
kernel/trace/trace.c
3506
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3507
struct trace_seq *s = &iter->seq;
kernel/trace/trace.c
3512
entry = iter->ent;
kernel/trace/trace.c
3514
test_cpu_buff_start(iter);
kernel/trace/trace.c
3519
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
kernel/trace/trace.c
3520
trace_print_lat_context(iter);
kernel/trace/trace.c
3522
trace_print_context(iter);
kernel/trace/trace.c
3530
return print_event_fields(iter, event);
kernel/trace/trace.c
3540
return print_event_fields(iter, event);
kernel/trace/trace.c
3542
return event->funcs->trace(iter, sym_flags, event);
kernel/trace/trace.c
3550
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
kernel/trace/trace.c
3552
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3553
struct trace_seq *s = &iter->seq;
kernel/trace/trace.c
3557
entry = iter->ent;
kernel/trace/trace.c
3561
entry->pid, iter->cpu, iter->ts);
kernel/trace/trace.c
3568
return event->funcs->raw(iter, 0, event);
kernel/trace/trace.c
3575
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
kernel/trace/trace.c
3577
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3578
struct trace_seq *s = &iter->seq;
kernel/trace/trace.c
3583
entry = iter->ent;
kernel/trace/trace.c
3587
SEQ_PUT_HEX_FIELD(s, iter->cpu);
kernel/trace/trace.c
3588
SEQ_PUT_HEX_FIELD(s, iter->ts);
kernel/trace/trace.c
3595
enum print_line_t ret = event->funcs->hex(iter, 0, event);
kernel/trace/trace.c
3605
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
kernel/trace/trace.c
3607
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3608
struct trace_seq *s = &iter->seq;
kernel/trace/trace.c
3612
entry = iter->ent;
kernel/trace/trace.c
3616
SEQ_PUT_FIELD(s, iter->cpu);
kernel/trace/trace.c
3617
SEQ_PUT_FIELD(s, iter->ts);
kernel/trace/trace.c
3623
return event ? event->funcs->binary(iter, 0, event) :
kernel/trace/trace.c
3627
int trace_empty(struct trace_iterator *iter)
kernel/trace/trace.c
3633
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
3634
cpu = iter->cpu_file;
kernel/trace/trace.c
3635
buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
3640
if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
kernel/trace/trace.c
3647
buf_iter = trace_buffer_iter(iter, cpu);
kernel/trace/trace.c
3652
if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
kernel/trace/trace.c
3661
enum print_line_t print_trace_line(struct trace_iterator *iter)
kernel/trace/trace.c
3663
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3667
if (iter->lost_events) {
kernel/trace/trace.c
3668
if (iter->lost_events == (unsigned long)-1)
kernel/trace/trace.c
3669
trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
kernel/trace/trace.c
3670
iter->cpu);
kernel/trace/trace.c
3672
trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
kernel/trace/trace.c
3673
iter->cpu, iter->lost_events);
kernel/trace/trace.c
3674
if (trace_seq_has_overflowed(&iter->seq))
kernel/trace/trace.c
3678
if (iter->trace && iter->trace->print_line) {
kernel/trace/trace.c
3679
ret = iter->trace->print_line(iter);
kernel/trace/trace.c
3684
if (iter->ent->type == TRACE_BPUTS &&
kernel/trace/trace.c
3687
return trace_print_bputs_msg_only(iter);
kernel/trace/trace.c
3689
if (iter->ent->type == TRACE_BPRINT &&
kernel/trace/trace.c
3692
return trace_print_bprintk_msg_only(iter);
kernel/trace/trace.c
3694
if (iter->ent->type == TRACE_PRINT &&
kernel/trace/trace.c
3697
return trace_print_printk_msg_only(iter);
kernel/trace/trace.c
3700
return print_bin_fmt(iter);
kernel/trace/trace.c
3703
return print_hex_fmt(iter);
kernel/trace/trace.c
3706
return print_raw_fmt(iter);
kernel/trace/trace.c
3708
return print_trace_fmt(iter);
kernel/trace/trace.c
3713
struct trace_iterator *iter = m->private;
kernel/trace/trace.c
3714
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3717
if (trace_empty(iter))
kernel/trace/trace.c
3720
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
kernel/trace/trace.c
3721
print_trace_header(m, iter);
kernel/trace/trace.c
3729
struct trace_iterator *iter = m->private;
kernel/trace/trace.c
3730
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
3736
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
kernel/trace/trace.c
3738
if (trace_empty(iter))
kernel/trace/trace.c
3740
print_trace_header(m, iter);
kernel/trace/trace.c
3746
print_func_help_header_irq(iter->array_buffer,
kernel/trace/trace.c
3749
print_func_help_header(iter->array_buffer, m,
kernel/trace/trace.c
3789
static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
kernel/trace/trace.c
3791
if (iter->tr->allocated_snapshot)
kernel/trace/trace.c
3797
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
kernel/trace/trace.c
3804
static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
kernel/trace/trace.c
3809
struct trace_iterator *iter = v;
kernel/trace/trace.c
3812
if (iter->ent == NULL) {
kernel/trace/trace.c
3813
if (iter->tr) {
kernel/trace/trace.c
3814
seq_printf(m, "# tracer: %s\n", iter->trace->name);
kernel/trace/trace.c
3818
if (iter->snapshot && trace_empty(iter))
kernel/trace/trace.c
3819
print_snapshot_help(m, iter);
kernel/trace/trace.c
3820
else if (iter->trace && iter->trace->print_header)
kernel/trace/trace.c
3821
iter->trace->print_header(m);
kernel/trace/trace.c
3825
} else if (iter->leftover) {
kernel/trace/trace.c
3830
ret = trace_print_seq(m, &iter->seq);
kernel/trace/trace.c
3833
iter->leftover = ret;
kernel/trace/trace.c
3836
ret = print_trace_line(iter);
kernel/trace/trace.c
3838
iter->seq.full = 0;
kernel/trace/trace.c
3839
trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
kernel/trace/trace.c
3841
ret = trace_print_seq(m, &iter->seq);
kernel/trace/trace.c
3849
iter->leftover = ret;
kernel/trace/trace.c
3880
static void free_trace_iter_content(struct trace_iterator *iter)
kernel/trace/trace.c
3883
if (iter->fmt != static_fmt_buf)
kernel/trace/trace.c
3884
kfree(iter->fmt);
kernel/trace/trace.c
3886
kfree(iter->temp);
kernel/trace/trace.c
3887
kfree(iter->buffer_iter);
kernel/trace/trace.c
3888
mutex_destroy(&iter->mutex);
kernel/trace/trace.c
3889
free_cpumask_var(iter->started);
kernel/trace/trace.c
3896
struct trace_iterator *iter;
kernel/trace/trace.c
3902
iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
kernel/trace/trace.c
3903
if (!iter)
kernel/trace/trace.c
3906
iter->buffer_iter = kzalloc_objs(*iter->buffer_iter, nr_cpu_ids);
kernel/trace/trace.c
3907
if (!iter->buffer_iter)
kernel/trace/trace.c
3918
iter->temp = kmalloc(128, GFP_KERNEL);
kernel/trace/trace.c
3919
if (iter->temp)
kernel/trace/trace.c
3920
iter->temp_size = 128;
kernel/trace/trace.c
3929
iter->fmt = NULL;
kernel/trace/trace.c
3930
iter->fmt_size = 0;
kernel/trace/trace.c
3933
iter->trace = tr->current_trace;
kernel/trace/trace.c
3935
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
kernel/trace/trace.c
3938
iter->tr = tr;
kernel/trace/trace.c
3943
iter->array_buffer = &tr->snapshot_buffer;
kernel/trace/trace.c
3946
iter->array_buffer = &tr->array_buffer;
kernel/trace/trace.c
3947
iter->snapshot = snapshot;
kernel/trace/trace.c
3948
iter->pos = -1;
kernel/trace/trace.c
3949
iter->cpu_file = tracing_get_cpu(inode);
kernel/trace/trace.c
3950
mutex_init(&iter->mutex);
kernel/trace/trace.c
3953
if (iter->trace->open)
kernel/trace/trace.c
3954
iter->trace->open(iter);
kernel/trace/trace.c
3957
if (ring_buffer_overruns(iter->array_buffer->buffer))
kernel/trace/trace.c
3958
iter->iter_flags |= TRACE_FILE_ANNOTATE;
kernel/trace/trace.c
3962
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
kernel/trace/trace.c
3968
if (!iter->snapshot && (tr->trace_flags & TRACE_ITER(PAUSE_ON_TRACE))) {
kernel/trace/trace.c
3969
iter->iter_flags |= TRACE_FILE_PAUSE;
kernel/trace/trace.c
3973
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
3975
iter->buffer_iter[cpu] =
kernel/trace/trace.c
3976
ring_buffer_read_start(iter->array_buffer->buffer,
kernel/trace/trace.c
3978
tracing_iter_reset(iter, cpu);
kernel/trace/trace.c
3981
cpu = iter->cpu_file;
kernel/trace/trace.c
3982
iter->buffer_iter[cpu] =
kernel/trace/trace.c
3983
ring_buffer_read_start(iter->array_buffer->buffer,
kernel/trace/trace.c
3985
tracing_iter_reset(iter, cpu);
kernel/trace/trace.c
3990
return iter;
kernel/trace/trace.c
3994
free_trace_iter_content(iter);
kernel/trace/trace.c
4078
struct trace_iterator *iter;
kernel/trace/trace.c
4087
iter = m->private;
kernel/trace/trace.c
4091
if (iter->buffer_iter[cpu])
kernel/trace/trace.c
4092
ring_buffer_read_finish(iter->buffer_iter[cpu]);
kernel/trace/trace.c
4095
if (iter->trace && iter->trace->close)
kernel/trace/trace.c
4096
iter->trace->close(iter);
kernel/trace/trace.c
4098
if (iter->iter_flags & TRACE_FILE_PAUSE)
kernel/trace/trace.c
4106
free_trace_iter_content(iter);
kernel/trace/trace.c
4134
struct trace_iterator *iter;
kernel/trace/trace.c
4160
iter = __tracing_open(inode, file, false);
kernel/trace/trace.c
4161
if (IS_ERR(iter))
kernel/trace/trace.c
4162
ret = PTR_ERR(iter);
kernel/trace/trace.c
4164
iter->iter_flags |= TRACE_FILE_LAT_FMT;
kernel/trace/trace.c
5796
struct trace_iterator *iter;
kernel/trace/trace.c
5811
iter = kzalloc_obj(*iter);
kernel/trace/trace.c
5812
if (!iter) {
kernel/trace/trace.c
5817
trace_seq_init(&iter->seq);
kernel/trace/trace.c
5818
iter->trace = tr->current_trace;
kernel/trace/trace.c
5820
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
kernel/trace/trace.c
5826
cpumask_setall(iter->started);
kernel/trace/trace.c
5829
iter->iter_flags |= TRACE_FILE_LAT_FMT;
kernel/trace/trace.c
5833
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
kernel/trace/trace.c
5835
iter->tr = tr;
kernel/trace/trace.c
5836
iter->array_buffer = &tr->array_buffer;
kernel/trace/trace.c
5837
iter->cpu_file = cpu;
kernel/trace/trace.c
5838
mutex_init(&iter->mutex);
kernel/trace/trace.c
5839
filp->private_data = iter;
kernel/trace/trace.c
5841
if (iter->trace->pipe_open)
kernel/trace/trace.c
5842
iter->trace->pipe_open(iter);
kernel/trace/trace.c
5851
kfree(iter);
kernel/trace/trace.c
5861
struct trace_iterator *iter = file->private_data;
kernel/trace/trace.c
5867
if (iter->trace->pipe_close)
kernel/trace/trace.c
5868
iter->trace->pipe_close(iter);
kernel/trace/trace.c
5869
close_pipe_on_cpu(tr, iter->cpu_file);
kernel/trace/trace.c
5872
free_trace_iter_content(iter);
kernel/trace/trace.c
5873
kfree(iter);
kernel/trace/trace.c
5881
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
kernel/trace/trace.c
5883
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
5886
if (trace_buffer_iter(iter, iter->cpu_file))
kernel/trace/trace.c
5895
return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
kernel/trace/trace.c
5896
filp, poll_table, iter->tr->buffer_percent);
kernel/trace/trace.c
5902
struct trace_iterator *iter = filp->private_data;
kernel/trace/trace.c
5904
return trace_poll(iter, filp, poll_table);
kernel/trace/trace.c
5910
struct trace_iterator *iter = filp->private_data;
kernel/trace/trace.c
5913
while (trace_empty(iter)) {
kernel/trace/trace.c
5928
if (!tracer_tracing_is_on(iter->tr) && iter->pos)
kernel/trace/trace.c
5931
mutex_unlock(&iter->mutex);
kernel/trace/trace.c
5933
ret = wait_on_pipe(iter, 0);
kernel/trace/trace.c
5935
mutex_lock(&iter->mutex);
kernel/trace/trace.c
5967
struct trace_iterator *iter = filp->private_data;
kernel/trace/trace.c
5975
guard(mutex)(&iter->mutex);
kernel/trace/trace.c
5978
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
kernel/trace/trace.c
5982
trace_seq_init(&iter->seq);
kernel/trace/trace.c
5984
if (iter->trace->read) {
kernel/trace/trace.c
5985
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
kernel/trace/trace.c
5991
if (update_last_data_if_empty(iter->tr))
kernel/trace/trace.c
5999
if (trace_empty(iter))
kernel/trace/trace.c
6006
trace_iterator_reset(iter);
kernel/trace/trace.c
6007
cpumask_clear(iter->started);
kernel/trace/trace.c
6008
trace_seq_init(&iter->seq);
kernel/trace/trace.c
6011
trace_access_lock(iter->cpu_file);
kernel/trace/trace.c
6012
while (trace_find_next_entry_inc(iter) != NULL) {
kernel/trace/trace.c
6014
int save_len = iter->seq.seq.len;
kernel/trace/trace.c
6016
ret = print_trace_line(iter);
kernel/trace/trace.c
6025
iter->seq.full = 0;
kernel/trace/trace.c
6026
trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
kernel/trace/trace.c
6027
trace_consume(iter);
kernel/trace/trace.c
6032
iter->seq.seq.len = save_len;
kernel/trace/trace.c
6036
trace_consume(iter);
kernel/trace/trace.c
6038
if (trace_seq_used(&iter->seq) >= cnt)
kernel/trace/trace.c
6046
WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
kernel/trace/trace.c
6047
iter->ent->type);
kernel/trace/trace.c
6049
trace_access_unlock(iter->cpu_file);
kernel/trace/trace.c
6053
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
kernel/trace/trace.c
6054
if (iter->seq.readpos >= trace_seq_used(&iter->seq))
kernel/trace/trace.c
6055
trace_seq_init(&iter->seq);
kernel/trace/trace.c
6074
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
kernel/trace/trace.c
6082
save_len = iter->seq.seq.len;
kernel/trace/trace.c
6083
ret = print_trace_line(iter);
kernel/trace/trace.c
6085
if (trace_seq_has_overflowed(&iter->seq)) {
kernel/trace/trace.c
6086
iter->seq.seq.len = save_len;
kernel/trace/trace.c
6096
iter->seq.seq.len = save_len;
kernel/trace/trace.c
6100
count = trace_seq_used(&iter->seq) - save_len;
kernel/trace/trace.c
6103
iter->seq.seq.len = save_len;
kernel/trace/trace.c
6108
trace_consume(iter);
kernel/trace/trace.c
6110
if (!trace_find_next_entry_inc(iter)) {
kernel/trace/trace.c
6112
iter->ent = NULL;
kernel/trace/trace.c
6128
struct trace_iterator *iter = filp->private_data;
kernel/trace/trace.c
6144
mutex_lock(&iter->mutex);
kernel/trace/trace.c
6146
if (iter->trace->splice_read) {
kernel/trace/trace.c
6147
ret = iter->trace->splice_read(iter, filp,
kernel/trace/trace.c
6157
if (!iter->ent && !trace_find_next_entry_inc(iter)) {
kernel/trace/trace.c
6163
trace_access_lock(iter->cpu_file);
kernel/trace/trace.c
6171
rem = tracing_fill_pipe_page(rem, iter);
kernel/trace/trace.c
6174
ret = trace_seq_to_buffer(&iter->seq,
kernel/trace/trace.c
6176
min((size_t)trace_seq_used(&iter->seq),
kernel/trace/trace.c
6185
trace_seq_init(&iter->seq);
kernel/trace/trace.c
6188
trace_access_unlock(iter->cpu_file);
kernel/trace/trace.c
6190
mutex_unlock(&iter->mutex);
kernel/trace/trace.c
6203
mutex_unlock(&iter->mutex);
kernel/trace/trace.c
7146
struct trace_iterator iter;
kernel/trace/trace.c
7157
struct trace_iterator *iter;
kernel/trace/trace.c
7166
iter = __tracing_open(inode, file, true);
kernel/trace/trace.c
7167
if (IS_ERR(iter))
kernel/trace/trace.c
7168
ret = PTR_ERR(iter);
kernel/trace/trace.c
7175
iter = kzalloc_obj(*iter);
kernel/trace/trace.c
7176
if (!iter) {
kernel/trace/trace.c
7182
iter->tr = tr;
kernel/trace/trace.c
7183
iter->array_buffer = &tr->snapshot_buffer;
kernel/trace/trace.c
7184
iter->cpu_file = tracing_get_cpu(inode);
kernel/trace/trace.c
7185
m->private = iter;
kernel/trace/trace.c
7205
struct trace_iterator *iter = m->private;
kernel/trace/trace.c
7206
struct trace_array *tr = iter->tr;
kernel/trace/trace.c
7234
if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
kernel/trace/trace.c
7242
if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
kernel/trace/trace.c
7247
&tr->array_buffer, iter->cpu_file);
kernel/trace/trace.c
7254
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
kernel/trace/trace.c
7259
smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
kernel/trace/trace.c
7266
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
kernel/trace/trace.c
7269
tracing_reset_cpu(&tr->snapshot_buffer, iter->cpu_file);
kernel/trace/trace.c
7319
if (tracer_uses_snapshot(info->iter.trace)) {
kernel/trace/trace.c
7324
info->iter.snapshot = true;
kernel/trace/trace.c
7325
info->iter.array_buffer = &info->iter.tr->snapshot_buffer;
kernel/trace/trace.c
7831
info->iter.tr = tr;
kernel/trace/trace.c
7832
info->iter.cpu_file = tracing_get_cpu(inode);
kernel/trace/trace.c
7833
info->iter.trace = tr->current_trace;
kernel/trace/trace.c
7834
info->iter.array_buffer = &tr->array_buffer;
kernel/trace/trace.c
7856
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
7858
return trace_poll(iter, filp, poll_table);
kernel/trace/trace.c
7866
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
7875
if (iter->snapshot && tracer_uses_snapshot(iter->tr->current_trace))
kernel/trace/trace.c
7878
page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
kernel/trace/trace.c
7883
ring_buffer_free_read_page(iter->array_buffer->buffer,
kernel/trace/trace.c
7890
info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
kernel/trace/trace.c
7891
iter->cpu_file);
kernel/trace/trace.c
7896
info->spare_cpu = iter->cpu_file;
kernel/trace/trace.c
7908
trace_access_lock(iter->cpu_file);
kernel/trace/trace.c
7909
ret = ring_buffer_read_page(iter->array_buffer->buffer,
kernel/trace/trace.c
7912
iter->cpu_file, 0);
kernel/trace/trace.c
7913
trace_access_unlock(iter->cpu_file);
kernel/trace/trace.c
7916
if (trace_empty(iter) && !iter->closed) {
kernel/trace/trace.c
7917
if (update_last_data_if_empty(iter->tr))
kernel/trace/trace.c
7923
ret = wait_on_pipe(iter, 0);
kernel/trace/trace.c
7953
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
7955
iter->closed = true;
kernel/trace/trace.c
7957
(void)atomic_fetch_inc_release(&iter->wait_index);
kernel/trace/trace.c
7959
ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
kernel/trace/trace.c
7967
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
7971
iter->tr->trace_ref--;
kernel/trace/trace.c
7973
__trace_array_put(iter->tr);
kernel/trace/trace.c
7976
ring_buffer_free_read_page(iter->array_buffer->buffer,
kernel/trace/trace.c
8044
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
8060
if (iter->snapshot && tracer_uses_snapshot(iter->tr->current_trace))
kernel/trace/trace.c
8063
page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
kernel/trace/trace.c
8077
trace_access_lock(iter->cpu_file);
kernel/trace/trace.c
8078
entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
kernel/trace/trace.c
8091
ref->buffer = iter->array_buffer->buffer;
kernel/trace/trace.c
8092
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
kernel/trace/trace.c
8099
ref->cpu = iter->cpu_file;
kernel/trace/trace.c
8102
len, iter->cpu_file, 1);
kernel/trace/trace.c
8119
entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
kernel/trace/trace.c
8122
trace_access_unlock(iter->cpu_file);
kernel/trace/trace.c
8138
ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
kernel/trace/trace.c
8143
if (!tracer_tracing_is_on(iter->tr))
kernel/trace/trace.c
8162
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
8167
err = ring_buffer_wait(iter->array_buffer->buffer,
kernel/trace/trace.c
8168
iter->cpu_file,
kernel/trace/trace.c
8169
iter->tr->buffer_percent,
kernel/trace/trace.c
8175
return ring_buffer_map_get_reader(iter->array_buffer->buffer,
kernel/trace/trace.c
8176
iter->cpu_file);
kernel/trace/trace.c
8188
(void)atomic_fetch_inc_release(&iter->wait_index);
kernel/trace/trace.c
8190
ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
kernel/trace/trace.c
8240
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
8242
ring_buffer_map_dup(iter->array_buffer->buffer, iter->cpu_file);
kernel/trace/trace.c
8248
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
8250
WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
kernel/trace/trace.c
8251
put_snapshot_map(iter->tr);
kernel/trace/trace.c
8272
struct trace_iterator *iter = &info->iter;
kernel/trace/trace.c
8276
if (iter->tr->flags & (TRACE_ARRAY_FL_MEMMAP | TRACE_ARRAY_FL_VMALLOC))
kernel/trace/trace.c
8279
ret = get_snapshot_map(iter->tr);
kernel/trace/trace.c
8283
ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
kernel/trace/trace.c
8285
put_snapshot_map(iter->tr);
kernel/trace/trace.h
1163
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
kernel/trace/trace.h
2265
static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
kernel/trace/trace.h
2267
memset_startat(iter, 0, seq);
kernel/trace/trace.h
2268
iter->pos = -1;
kernel/trace/trace.h
625
void (*open)(struct trace_iterator *iter);
kernel/trace/trace.h
626
void (*pipe_open)(struct trace_iterator *iter);
kernel/trace/trace.h
627
void (*close)(struct trace_iterator *iter);
kernel/trace/trace.h
628
void (*pipe_close)(struct trace_iterator *iter);
kernel/trace/trace.h
629
ssize_t (*read)(struct trace_iterator *iter,
kernel/trace/trace.h
632
ssize_t (*splice_read)(struct trace_iterator *iter,
kernel/trace/trace.h
643
enum print_line_t (*print_line)(struct trace_iterator *iter);
kernel/trace/trace.h
664
trace_buffer_iter(struct trace_iterator *iter, int cpu)
kernel/trace/trace.h
666
return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
kernel/trace/trace.h
723
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
kernel/trace/trace.h
730
const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
kernel/trace/trace.h
731
char *trace_iter_expand_format(struct trace_iterator *iter);
kernel/trace/trace.h
732
bool ignore_event(struct trace_iterator *iter);
kernel/trace/trace.h
734
int trace_empty(struct trace_iterator *iter);
kernel/trace/trace.h
736
void *trace_find_next_entry_inc(struct trace_iterator *iter);
kernel/trace/trace.h
738
void trace_init_global_iter(struct trace_iterator *iter);
kernel/trace/trace.h
740
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
kernel/trace/trace.h
756
void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
kernel/trace/trace.h
917
enum print_line_t print_trace_line(struct trace_iterator *iter);
kernel/trace/trace.h
979
print_graph_function_flags(struct trace_iterator *iter, u32 flags);
kernel/trace/trace.h
983
extern void graph_trace_open(struct trace_iterator *iter);
kernel/trace/trace.h
984
extern void graph_trace_close(struct trace_iterator *iter);
kernel/trace/trace_branch.c
134
static enum print_line_t trace_branch_print(struct trace_iterator *iter,
kernel/trace/trace_branch.c
139
trace_assign_type(field, iter->ent);
kernel/trace/trace_branch.c
141
trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
kernel/trace/trace_branch.c
147
return trace_handle_return(&iter->seq);
kernel/trace/trace_eprobe.c
255
print_eprobe_event(struct trace_iterator *iter, int flags,
kernel/trace/trace_eprobe.c
261
struct trace_seq *s = &iter->seq;
kernel/trace/trace_eprobe.c
266
field = (struct eprobe_trace_entry_head *)iter->ent;
kernel/trace/trace_eprobe.c
600
struct event_trigger_data *trigger = NULL, *iter;
kernel/trace/trace_eprobe.c
609
list_for_each_entry(iter, &file->triggers, list) {
kernel/trace/trace_eprobe.c
610
if (!(iter->flags & EVENT_TRIGGER_FL_PROBE))
kernel/trace/trace_eprobe.c
612
edata = iter->private_data;
kernel/trace/trace_eprobe.c
614
trigger = iter;
kernel/trace/trace_events.c
1612
struct set_event_iter *iter = v;
kernel/trace/trace_events.c
1618
if (iter->type == SET_EVENT_FILE) {
kernel/trace/trace_events.c
1619
file = iter->file;
kernel/trace/trace_events.c
1622
iter->file = file;
kernel/trace/trace_events.c
1623
return iter;
kernel/trace/trace_events.c
1627
iter->type = SET_EVENT_MOD;
kernel/trace/trace_events.c
1628
iter->event_mod = list_entry(&tr->mod_events, struct event_mod_load, list);
kernel/trace/trace_events.c
1633
list_for_each_entry_continue(iter->event_mod, &tr->mod_events, list)
kernel/trace/trace_events.c
1634
return iter;
kernel/trace/trace_events.c
1643
kfree(iter);
kernel/trace/trace_events.c
1650
struct set_event_iter *iter;
kernel/trace/trace_events.c
1653
iter = kzalloc_obj(*iter);
kernel/trace/trace_events.c
1655
if (!iter)
kernel/trace/trace_events.c
1658
iter->type = SET_EVENT_FILE;
kernel/trace/trace_events.c
1659
iter->file = list_entry(&tr->events, struct trace_event_file, list);
kernel/trace/trace_events.c
1662
iter = s_next(m, iter, &l);
kernel/trace/trace_events.c
1663
if (!iter)
kernel/trace/trace_events.c
1666
return iter;
kernel/trace/trace_events.c
1765
struct set_event_iter *iter = v;
kernel/trace/trace_events.c
1769
if (iter->type == SET_EVENT_FILE)
kernel/trace/trace_events.c
1770
return t_show(m, iter->file);
kernel/trace/trace_events.c
1773
if (iter->event_mod->match) {
kernel/trace/trace_events.c
1774
seq_printf(m, "%s:mod:%s\n", iter->event_mod->match,
kernel/trace/trace_events.c
1775
iter->event_mod->module);
kernel/trace/trace_events.c
1779
system = iter->event_mod->system ? : "*";
kernel/trace/trace_events.c
1780
event = iter->event_mod->event ? : "*";
kernel/trace/trace_events.c
1782
seq_printf(m, "%s:%s:mod:%s\n", system, event, iter->event_mod->module);
kernel/trace/trace_events.c
1789
struct set_event_iter *iter = v;
kernel/trace/trace_events.c
1791
return t_show(m, iter->file);
kernel/trace/trace_events.c
2931
struct event_subsystem *system, *iter;
kernel/trace/trace_events.c
2958
list_for_each_entry(iter, &event_subsystems, list) {
kernel/trace/trace_events.c
2959
if (strcmp(iter->name, name) == 0) {
kernel/trace/trace_events.c
2960
system = iter;
kernel/trace/trace_events.c
4730
struct trace_event_call **iter, *call;
kernel/trace/trace_events.c
4736
for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
kernel/trace/trace_events.c
4738
call = *iter;
kernel/trace/trace_events_hist.c
6710
struct event_trigger_data *test = NULL, *iter, *named_data = NULL;
kernel/trace/trace_events_hist.c
6718
list_for_each_entry(iter, &file->triggers, list) {
kernel/trace/trace_events_hist.c
6719
if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) {
kernel/trace/trace_events_hist.c
6720
if (!hist_trigger_match(data, iter, named_data, false))
kernel/trace/trace_events_hist.c
6722
test = iter;
kernel/trace/trace_events_synth.c
344
static enum print_line_t print_synth_event(struct trace_iterator *iter,
kernel/trace/trace_events_synth.c
348
struct trace_array *tr = iter->tr;
kernel/trace/trace_events_synth.c
349
struct trace_seq *s = &iter->seq;
kernel/trace/trace_events_synth.c
356
entry = (struct synth_trace_event *)iter->ent;
kernel/trace/trace_events_trigger.c
1888
struct event_trigger_data *data = NULL, *iter;
kernel/trace/trace_events_trigger.c
1893
list_for_each_entry(iter, &file->triggers, list) {
kernel/trace/trace_events_trigger.c
1894
enable_data = iter->private_data;
kernel/trace/trace_events_trigger.c
1896
(iter->cmd_ops->trigger_type ==
kernel/trace/trace_events_trigger.c
1899
data = iter;
kernel/trace/trace_events_trigger.c
727
struct event_trigger_data *data = NULL, *iter;
kernel/trace/trace_events_trigger.c
731
list_for_each_entry(iter, &file->triggers, list) {
kernel/trace/trace_events_trigger.c
732
if (iter->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
kernel/trace/trace_events_trigger.c
733
data = iter;
kernel/trace/trace_events_user.c
1439
static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
kernel/trace/trace_events_user.c
1443
return print_event_fields(iter, event);
kernel/trace/trace_fprobe.c
620
print_fentry_event(struct trace_iterator *iter, int flags,
kernel/trace/trace_fprobe.c
624
struct trace_seq *s = &iter->seq;
kernel/trace/trace_fprobe.c
627
field = (struct fentry_trace_entry_head *)iter->ent;
kernel/trace/trace_fprobe.c
650
print_fexit_event(struct trace_iterator *iter, int flags,
kernel/trace/trace_fprobe.c
654
struct trace_seq *s = &iter->seq;
kernel/trace/trace_fprobe.c
657
field = (struct fexit_trace_entry_head *)iter->ent;
kernel/trace/trace_functions_graph.c
1000
(void *)graph_ret->func + iter->tr->text_delta,
kernel/trace/trace_functions_graph.c
1013
print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
kernel/trace/trace_functions_graph.c
1014
cpu, iter->ent->pid, flags);
kernel/trace/trace_functions_graph.c
1020
print_graph_entry_nested(struct trace_iterator *iter,
kernel/trace/trace_functions_graph.c
1025
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1026
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1033
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1051
func = call->func + iter->tr->text_delta;
kernel/trace/trace_functions_graph.c
1055
args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
kernel/trace/trace_functions_graph.c
1081
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
kernel/trace/trace_functions_graph.c
1084
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1085
struct trace_entry *ent = iter->ent;
kernel/trace/trace_functions_graph.c
1086
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1087
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1094
print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
kernel/trace/trace_functions_graph.c
1101
print_graph_abs_time(iter->ts, s);
kernel/trace/trace_functions_graph.c
1105
print_graph_rel_time(iter, s);
kernel/trace/trace_functions_graph.c
1136
check_irq_entry(struct trace_iterator *iter, u32 flags,
kernel/trace/trace_functions_graph.c
1139
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1141
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1143
addr += iter->tr->text_delta;
kernel/trace/trace_functions_graph.c
1185
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
kernel/trace/trace_functions_graph.c
1187
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1189
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1230
struct trace_iterator *iter, u32 flags)
kernel/trace/trace_functions_graph.c
1232
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1236
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1250
if (iter->ent_size > sizeof(save_buf))
kernel/trace/trace_functions_graph.c
1251
iter->ent_size = sizeof(save_buf);
kernel/trace/trace_functions_graph.c
1254
memcpy(entry, field, iter->ent_size);
kernel/trace/trace_functions_graph.c
1258
if (check_irq_entry(iter, flags, call->func, call->depth))
kernel/trace/trace_functions_graph.c
1261
print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
kernel/trace/trace_functions_graph.c
1263
leaf_ret = get_return_for_leaf(iter, entry);
kernel/trace/trace_functions_graph.c
1265
ret = print_graph_entry_leaf(iter, entry, leaf_ret, s, flags);
kernel/trace/trace_functions_graph.c
1267
ret = print_graph_entry_nested(iter, entry, s, cpu, flags);
kernel/trace/trace_functions_graph.c
1286
struct trace_entry *ent, struct trace_iterator *iter,
kernel/trace/trace_functions_graph.c
1293
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1294
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1297
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1301
func = trace->func + iter->tr->text_delta;
kernel/trace/trace_functions_graph.c
1303
if (check_irq_return(iter, flags, trace->depth))
kernel/trace/trace_functions_graph.c
1308
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1327
print_graph_prologue(iter, s, 0, 0, flags);
kernel/trace/trace_functions_graph.c
1363
print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
kernel/trace/trace_functions_graph.c
1371
struct trace_iterator *iter, u32 flags)
kernel/trace/trace_functions_graph.c
1373
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1375
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1382
depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
kernel/trace/trace_functions_graph.c
1384
print_graph_prologue(iter, s, 0, 0, flags);
kernel/trace/trace_functions_graph.c
1397
switch (iter->ent->type) {
kernel/trace/trace_functions_graph.c
1399
ret = trace_print_bputs_msg_only(iter);
kernel/trace/trace_functions_graph.c
1404
ret = trace_print_bprintk_msg_only(iter);
kernel/trace/trace_functions_graph.c
1409
ret = trace_print_printk_msg_only(iter);
kernel/trace/trace_functions_graph.c
1418
ret = event->funcs->trace(iter, sym_flags, event);
kernel/trace/trace_functions_graph.c
1439
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
kernel/trace/trace_functions_graph.c
1442
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1443
struct trace_entry *entry = iter->ent;
kernel/trace/trace_functions_graph.c
1444
struct trace_seq *s = &iter->seq;
kernel/trace/trace_functions_graph.c
1445
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
1459
iter->cpu = data->cpu;
kernel/trace/trace_functions_graph.c
1460
ret = print_graph_entry(field, s, iter, flags);
kernel/trace/trace_functions_graph.c
1461
if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
kernel/trace/trace_functions_graph.c
1462
per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
kernel/trace/trace_functions_graph.c
1465
iter->cpu = cpu;
kernel/trace/trace_functions_graph.c
1472
return print_graph_entry(field, s, iter, flags);
kernel/trace/trace_functions_graph.c
1486
s, iter, flags);
kernel/trace/trace_functions_graph.c
1492
return print_graph_return(field, s, entry, iter, flags);
kernel/trace/trace_functions_graph.c
1500
return print_graph_comment(s, entry, iter, flags);
kernel/trace/trace_functions_graph.c
1507
print_graph_function(struct trace_iterator *iter)
kernel/trace/trace_functions_graph.c
1509
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1510
return print_graph_function_flags(iter, tr->current_trace_flags->val);
kernel/trace/trace_functions_graph.c
1514
print_graph_function_event(struct trace_iterator *iter, int flags,
kernel/trace/trace_functions_graph.c
1517
return print_graph_function(iter);
kernel/trace/trace_functions_graph.c
1586
struct trace_iterator *iter = s->private;
kernel/trace/trace_functions_graph.c
1587
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1594
struct trace_iterator *iter = s->private;
kernel/trace/trace_functions_graph.c
1595
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
1602
if (trace_empty(iter))
kernel/trace/trace_functions_graph.c
1605
print_trace_header(s, iter);
kernel/trace/trace_functions_graph.c
1611
void graph_trace_open(struct trace_iterator *iter)
kernel/trace/trace_functions_graph.c
1618
iter->private = NULL;
kernel/trace/trace_functions_graph.c
1643
iter->private = data;
kernel/trace/trace_functions_graph.c
1653
void graph_trace_close(struct trace_iterator *iter)
kernel/trace/trace_functions_graph.c
1655
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
1660
iter->private = NULL;
kernel/trace/trace_functions_graph.c
637
get_return_for_leaf(struct trace_iterator *iter,
kernel/trace/trace_functions_graph.c
640
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
654
ring_iter = trace_buffer_iter(iter, iter->cpu);
kernel/trace/trace_functions_graph.c
664
ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
kernel/trace/trace_functions_graph.c
666
event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
kernel/trace/trace_functions_graph.c
680
int size = min_t(int, sizeof(data->rent), iter->ent_size);
kernel/trace/trace_functions_graph.c
721
print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
kernel/trace/trace_functions_graph.c
725
usecs = iter->ts - iter->array_buffer->time_start;
kernel/trace/trace_functions_graph.c
732
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
kernel/trace/trace_functions_graph.c
735
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
736
struct trace_seq *s = &iter->seq;
kernel/trace/trace_functions_graph.c
737
struct trace_entry *ent = iter->ent;
kernel/trace/trace_functions_graph.c
739
addr += iter->tr->text_delta;
kernel/trace/trace_functions_graph.c
748
print_graph_abs_time(iter->ts, s);
kernel/trace/trace_functions_graph.c
752
print_graph_rel_time(iter, s);
kernel/trace/trace_functions_graph.c
947
print_graph_entry_leaf(struct trace_iterator *iter,
kernel/trace/trace_functions_graph.c
952
struct fgraph_data *data = iter->private;
kernel/trace/trace_functions_graph.c
953
struct trace_array *tr = iter->tr;
kernel/trace/trace_functions_graph.c
959
int cpu = iter->cpu;
kernel/trace/trace_functions_graph.c
962
args_size = iter->ent_size - offsetof(struct ftrace_graph_ent_entry, args);
kernel/trace/trace_functions_graph.c
993
ret_func = graph_ret->func + iter->tr->text_delta;
kernel/trace/trace_irqsoff.c
246
static void irqsoff_trace_open(struct trace_iterator *iter)
kernel/trace/trace_irqsoff.c
248
if (is_graph(iter->tr))
kernel/trace/trace_irqsoff.c
249
graph_trace_open(iter);
kernel/trace/trace_irqsoff.c
252
static void irqsoff_trace_close(struct trace_iterator *iter)
kernel/trace/trace_irqsoff.c
254
if (iter->private)
kernel/trace/trace_irqsoff.c
255
graph_trace_close(iter);
kernel/trace/trace_irqsoff.c
263
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
kernel/trace/trace_irqsoff.c
269
if (is_graph(iter->tr))
kernel/trace/trace_irqsoff.c
270
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
kernel/trace/trace_irqsoff.c
305
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
kernel/trace/trace_irqsoff.c
310
static void irqsoff_trace_open(struct trace_iterator *iter) { }
kernel/trace/trace_irqsoff.c
311
static void irqsoff_trace_close(struct trace_iterator *iter) { }
kernel/trace/trace_kdb.c
117
trace_init_global_iter(&iter);
kernel/trace/trace_kdb.c
118
iter.buffer_iter = buffer_iter;
kernel/trace/trace_kdb.c
120
tracer_tracing_disable(iter.tr);
kernel/trace/trace_kdb.c
133
tracer_tracing_enable(iter.tr);
kernel/trace/trace_kdb.c
20
static struct trace_iterator iter;
kernel/trace/trace_kdb.c
29
tr = iter.tr;
kernel/trace/trace_kdb.c
40
trace_iterator_reset(&iter);
kernel/trace/trace_kdb.c
41
iter.iter_flags |= TRACE_FILE_LAT_FMT;
kernel/trace/trace_kdb.c
45
iter.buffer_iter[cpu] =
kernel/trace/trace_kdb.c
46
ring_buffer_read_start(iter.array_buffer->buffer,
kernel/trace/trace_kdb.c
48
tracing_iter_reset(&iter, cpu);
kernel/trace/trace_kdb.c
51
iter.cpu_file = cpu_file;
kernel/trace/trace_kdb.c
52
iter.buffer_iter[cpu_file] =
kernel/trace/trace_kdb.c
53
ring_buffer_read_start(iter.array_buffer->buffer,
kernel/trace/trace_kdb.c
55
tracing_iter_reset(&iter, cpu_file);
kernel/trace/trace_kdb.c
58
while (trace_find_next_entry_inc(&iter)) {
kernel/trace/trace_kdb.c
64
print_trace_line(&iter);
kernel/trace/trace_kdb.c
65
trace_printk_seq(&iter.seq);
kernel/trace/trace_kdb.c
83
if (iter.buffer_iter[cpu]) {
kernel/trace/trace_kdb.c
84
ring_buffer_read_finish(iter.buffer_iter[cpu]);
kernel/trace/trace_kdb.c
85
iter.buffer_iter[cpu] = NULL;
kernel/trace/trace_kprobe.c
1573
print_kprobe_event(struct trace_iterator *iter, int flags,
kernel/trace/trace_kprobe.c
1577
struct trace_seq *s = &iter->seq;
kernel/trace/trace_kprobe.c
1580
field = (struct kprobe_trace_entry_head *)iter->ent;
kernel/trace/trace_kprobe.c
1603
print_kretprobe_event(struct trace_iterator *iter, int flags,
kernel/trace/trace_kprobe.c
1607
struct trace_seq *s = &iter->seq;
kernel/trace/trace_kprobe.c
1610
field = (struct kretprobe_trace_entry_head *)iter->ent;
kernel/trace/trace_mmiotrace.c
100
struct trace_seq *s = &iter->seq;
kernel/trace/trace_mmiotrace.c
109
iter->private = hiter;
kernel/trace/trace_mmiotrace.c
113
static void mmio_close(struct trace_iterator *iter)
kernel/trace/trace_mmiotrace.c
115
struct header_iter *hiter = iter->private;
kernel/trace/trace_mmiotrace.c
117
iter->private = NULL;
kernel/trace/trace_mmiotrace.c
120
static unsigned long count_overruns(struct trace_iterator *iter)
kernel/trace/trace_mmiotrace.c
123
unsigned long over = ring_buffer_overruns(iter->array_buffer->buffer);
kernel/trace/trace_mmiotrace.c
131
static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
kernel/trace/trace_mmiotrace.c
135
struct header_iter *hiter = iter->private;
kernel/trace/trace_mmiotrace.c
136
struct trace_seq *s = &iter->seq;
kernel/trace/trace_mmiotrace.c
139
n = count_overruns(iter);
kernel/trace/trace_mmiotrace.c
157
iter->private = NULL;
kernel/trace/trace_mmiotrace.c
165
static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
kernel/trace/trace_mmiotrace.c
167
struct trace_entry *entry = iter->ent;
kernel/trace/trace_mmiotrace.c
170
struct trace_seq *s = &iter->seq;
kernel/trace/trace_mmiotrace.c
171
unsigned long long t = ns2usecs(iter->ts);
kernel/trace/trace_mmiotrace.c
210
static enum print_line_t mmio_print_map(struct trace_iterator *iter)
kernel/trace/trace_mmiotrace.c
212
struct trace_entry *entry = iter->ent;
kernel/trace/trace_mmiotrace.c
215
struct trace_seq *s = &iter->seq;
kernel/trace/trace_mmiotrace.c
216
unsigned long long t = ns2usecs(iter->ts);
kernel/trace/trace_mmiotrace.c
244
static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
kernel/trace/trace_mmiotrace.c
246
struct trace_entry *entry = iter->ent;
kernel/trace/trace_mmiotrace.c
249
struct trace_seq *s = &iter->seq;
kernel/trace/trace_mmiotrace.c
250
unsigned long long t = ns2usecs(iter->ts);
kernel/trace/trace_mmiotrace.c
260
static enum print_line_t mmio_print_line(struct trace_iterator *iter)
kernel/trace/trace_mmiotrace.c
262
switch (iter->ent->type) {
kernel/trace/trace_mmiotrace.c
264
return mmio_print_rw(iter);
kernel/trace/trace_mmiotrace.c
266
return mmio_print_map(iter);
kernel/trace/trace_mmiotrace.c
268
return mmio_print_mark(iter);
kernel/trace/trace_mmiotrace.c
97
static void mmio_pipe_open(struct trace_iterator *iter)
kernel/trace/trace_output.c
1002
if (offset + len > iter->ent_size) {
kernel/trace/trace_output.c
1003
trace_seq_puts(&iter->seq, "<OVERFLOW>");
kernel/trace/trace_output.c
1006
str = (char *)iter->ent + offset;
kernel/trace/trace_output.c
1015
trace_seq_putc(&iter->seq, str[i]);
kernel/trace/trace_output.c
1017
trace_seq_putc(&iter->seq, '.');
kernel/trace/trace_output.c
1019
trace_seq_puts(&iter->seq, " (");
kernel/trace/trace_output.c
1022
trace_seq_putc(&iter->seq, ':');
kernel/trace/trace_output.c
1023
trace_seq_printf(&iter->seq, "%02x", str[i]);
kernel/trace/trace_output.c
1025
trace_seq_putc(&iter->seq, ')');
kernel/trace/trace_output.c
1027
trace_seq_printf(&iter->seq, "%.*s", len, str);
kernel/trace/trace_output.c
1031
if (!iter->fmt_size)
kernel/trace/trace_output.c
1032
trace_iter_expand_format(iter);
kernel/trace/trace_output.c
1034
ret = strncpy_from_kernel_nofault(iter->fmt, (void *)addr,
kernel/trace/trace_output.c
1035
iter->fmt_size);
kernel/trace/trace_output.c
1037
trace_seq_printf(&iter->seq, "(0x%px)", pos);
kernel/trace/trace_output.c
1039
trace_seq_printf(&iter->seq, "(0x%px:%s)",
kernel/trace/trace_output.c
1040
pos, iter->fmt);
kernel/trace/trace_output.c
1044
trace_seq_printf(&iter->seq, "%pS", (void *)addr);
kernel/trace/trace_output.c
1051
trace_seq_printf(&iter->seq, "'%c'",
kernel/trace/trace_output.c
1054
trace_seq_printf(&iter->seq, "(%d)",
kernel/trace/trace_output.c
1058
trace_seq_printf(&iter->seq, "0x%x (%d)",
kernel/trace/trace_output.c
1065
print_array(iter, pos, NULL);
kernel/trace/trace_output.c
1070
print_array(iter, pos, field);
kernel/trace/trace_output.c
1083
trace_seq_printf(&iter->seq, "%pS ", (void *)ip);
kernel/trace/trace_output.c
1088
trace_seq_printf(&iter->seq, "%pS (%d)",
kernel/trace/trace_output.c
1091
trace_seq_printf(&iter->seq, "0x%x (%d)",
kernel/trace/trace_output.c
1099
trace_seq_printf(&iter->seq, "%pS (%lld)",
kernel/trace/trace_output.c
1102
trace_seq_printf(&iter->seq, "0x%llx (%lld)", laddr, laddr);
kernel/trace/trace_output.c
1106
trace_seq_puts(&iter->seq, "<INVALID-SIZE>");
kernel/trace/trace_output.c
1111
trace_seq_puts(&iter->seq, "<INVALID-TYPE>");
kernel/trace/trace_output.c
1114
trace_seq_putc(&iter->seq, '\n');
kernel/trace/trace_output.c
1117
enum print_line_t print_event_fields(struct trace_iterator *iter,
kernel/trace/trace_output.c
1139
trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
kernel/trace/trace_output.c
1147
trace_seq_printf(&iter->seq, "%s:", trace_event_name(call));
kernel/trace/trace_output.c
1150
print_fields(iter, call, head);
kernel/trace/trace_output.c
1152
trace_seq_puts(&iter->seq, "No fields found\n");
kernel/trace/trace_output.c
1155
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1158
enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1161
trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
kernel/trace/trace_output.c
1163
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1184
static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1188
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1192
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1194
args_size = iter->ent_size - offsetof(struct ftrace_entry, args);
kernel/trace/trace_output.c
1200
print_fn_trace(s, field->ip, field->parent_ip, args, iter->tr, flags);
kernel/trace/trace_output.c
1206
static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1211
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1213
trace_seq_printf(&iter->seq, "%lx %lx\n",
kernel/trace/trace_output.c
1217
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1220
static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1224
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1226
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1234
static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1238
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1240
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1261
static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
kernel/trace/trace_output.c
1269
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1274
trace_seq_printf(&iter->seq,
kernel/trace/trace_output.c
1284
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1287
static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1290
return trace_ctxwake_print(iter, "==>");
kernel/trace/trace_output.c
1293
static enum print_line_t trace_wake_print(struct trace_iterator *iter,
kernel/trace/trace_output.c
1296
return trace_ctxwake_print(iter, " +");
kernel/trace/trace_output.c
1299
static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
kernel/trace/trace_output.c
1304
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1309
trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
kernel/trace/trace_output.c
1318
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1321
static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1324
return trace_ctxwake_raw(iter, 0);
kernel/trace/trace_output.c
1327
static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1330
return trace_ctxwake_raw(iter, '+');
kernel/trace/trace_output.c
1334
static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
kernel/trace/trace_output.c
1337
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1340
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1357
static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1360
return trace_ctxwake_hex(iter, 0);
kernel/trace/trace_output.c
1363
static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1366
return trace_ctxwake_hex(iter, '+');
kernel/trace/trace_output.c
1369
static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
kernel/trace/trace_output.c
1373
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1375
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1414
static enum print_line_t trace_stack_print(struct trace_iterator *iter,
kernel/trace/trace_output.c
1418
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1422
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1423
end = (unsigned long *)((long)iter->ent + iter->ent_size);
kernel/trace/trace_output.c
1437
seq_print_ip_sym(s, trace_adjust_address(iter->tr, *p), flags);
kernel/trace/trace_output.c
1454
static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
kernel/trace/trace_output.c
1457
struct trace_array *tr = iter->tr;
kernel/trace/trace_output.c
1459
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1463
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1508
trace_hwlat_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1511
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
1512
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1542
trace_hwlat_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1546
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1548
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1572
trace_osnoise_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1575
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
1576
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1609
trace_osnoise_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1613
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1615
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1644
trace_timerlat_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1647
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
1648
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1662
trace_timerlat_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1666
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1668
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1690
trace_bputs_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1693
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
1694
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1708
trace_bputs_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1712
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1714
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1734
trace_bprint_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1737
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
1738
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1752
trace_bprint_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1756
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1758
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1777
static enum print_line_t trace_print_print(struct trace_iterator *iter,
kernel/trace/trace_output.c
1781
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1784
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1786
ip = trace_adjust_address(iter->tr, field->ip);
kernel/trace/trace_output.c
1794
static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1799
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1801
trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
kernel/trace/trace_output.c
1803
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1816
static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1822
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1824
trace_seq_printf(&iter->seq, "# %x buf:", field->id);
kernel/trace/trace_output.c
1826
for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
kernel/trace/trace_output.c
1827
trace_seq_printf(&iter->seq, " %02x",
kernel/trace/trace_output.c
1830
trace_seq_putc(&iter->seq, '\n');
kernel/trace/trace_output.c
1832
return trace_handle_return(&iter->seq);
kernel/trace/trace_output.c
1846
trace_func_repeats_raw(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1850
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1852
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1864
trace_func_repeats_print(struct trace_iterator *iter, int flags,
kernel/trace/trace_output.c
1868
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
1870
trace_assign_type(field, iter->ent);
kernel/trace/trace_output.c
1872
print_fn_trace(s, field->ip, field->parent_ip, NULL, iter->tr, flags);
kernel/trace/trace_output.c
1874
trace_print_time(s, iter,
kernel/trace/trace_output.c
1875
iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
kernel/trace/trace_output.c
213
trace_print_bitmask_seq(struct trace_iterator *iter, void *bitmask_ptr,
kernel/trace/trace_output.c
216
struct trace_seq *p = &iter->tmp_seq;
kernel/trace/trace_output.c
217
const struct trace_array *tr = iter->tr;
kernel/trace/trace_output.c
30
enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
kernel/trace/trace_output.c
32
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
324
int trace_raw_output_prep(struct trace_iterator *iter,
kernel/trace/trace_output.c
328
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
329
struct trace_seq *p = &iter->tmp_seq;
kernel/trace/trace_output.c
33
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
333
entry = iter->ent;
kernel/trace/trace_output.c
347
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
kernel/trace/trace_output.c
349
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
352
if (ignore_event(iter))
kernel/trace/trace_output.c
356
trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
kernel/trace/trace_output.c
362
int trace_output_raw(struct trace_iterator *iter, char *name,
kernel/trace/trace_output.c
365
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
368
trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
kernel/trace/trace_output.c
373
int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
kernel/trace/trace_output.c
379
ret = trace_output_raw(iter, name, fmt, ap);
kernel/trace/trace_output.c
43
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
kernel/trace/trace_output.c
45
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
46
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
56
enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
kernel/trace/trace_output.c
58
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
59
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
593
lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
kernel/trace/trace_output.c
595
struct trace_array *tr = iter->tr;
kernel/trace/trace_output.c
597
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
kernel/trace/trace_output.c
598
unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
kernel/trace/trace_output.c
599
unsigned long long rel_ts = next_ts - iter->ts;
kernel/trace/trace_output.c
600
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
615
ns2usecs(iter->ts),
kernel/trace/trace_output.c
622
iter->ts, abs_ts, rel_ts);
kernel/trace/trace_output.c
637
static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
kernel/trace/trace_output.c
643
if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
kernel/trace/trace_output.c
652
int trace_print_context(struct trace_iterator *iter)
kernel/trace/trace_output.c
654
struct trace_array *tr = iter->tr;
kernel/trace/trace_output.c
655
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
656
struct trace_entry *entry = iter->ent;
kernel/trace/trace_output.c
672
trace_seq_printf(s, "[%03d] ", iter->cpu);
kernel/trace/trace_output.c
677
trace_print_time(s, iter, iter->ts);
kernel/trace/trace_output.c
683
int trace_print_lat_context(struct trace_iterator *iter)
kernel/trace/trace_output.c
686
struct trace_array *tr = iter->tr;
kernel/trace/trace_output.c
687
struct trace_seq *s = &iter->seq;
kernel/trace/trace_output.c
691
next_entry = trace_find_next_entry(iter, NULL, &next_ts);
kernel/trace/trace_output.c
693
next_ts = iter->ts;
kernel/trace/trace_output.c
696
entry = iter->ent;
kernel/trace/trace_output.c
705
comm, entry->pid, iter->cpu, entry->flags,
kernel/trace/trace_output.c
706
entry->preempt_count & 0xf, iter->idx);
kernel/trace/trace_output.c
708
lat_print_generic(s, entry, iter->cpu);
kernel/trace/trace_output.c
711
lat_print_timestamp(iter, next_ts);
kernel/trace/trace_output.c
940
static void print_array(struct trace_iterator *iter, void *pos,
kernel/trace/trace_output.c
953
if (offset + len > iter->ent_size) {
kernel/trace/trace_output.c
954
trace_seq_puts(&iter->seq, "<OVERFLOW>");
kernel/trace/trace_output.c
958
pos = (void *)iter->ent + offset;
kernel/trace/trace_output.c
962
trace_seq_putc(&iter->seq, ',');
kernel/trace/trace_output.c
963
trace_seq_printf(&iter->seq, "%02x", *(unsigned char *)pos);
kernel/trace/trace_output.c
967
static void print_fields(struct trace_iterator *iter, struct trace_event_call *call,
kernel/trace/trace_output.c
971
struct trace_array *tr = iter->tr;
kernel/trace/trace_output.c
982
trace_seq_printf(&iter->seq, " %s=", field->name);
kernel/trace/trace_output.c
983
if (field->offset + field->size > iter->ent_size) {
kernel/trace/trace_output.c
984
trace_seq_puts(&iter->seq, "<OVERFLOW>");
kernel/trace/trace_output.c
987
pos = (void *)iter->ent + field->offset;
kernel/trace/trace_output.c
992
trace_seq_printf(&iter->seq, "%.*s", field->size, (char *)pos);
kernel/trace/trace_output.h
11
trace_print_bprintk_msg_only(struct trace_iterator *iter);
kernel/trace/trace_output.h
13
trace_print_printk_msg_only(struct trace_iterator *iter);
kernel/trace/trace_output.h
31
extern int trace_print_context(struct trace_iterator *iter);
kernel/trace/trace_output.h
32
extern int trace_print_lat_context(struct trace_iterator *iter);
kernel/trace/trace_output.h
33
extern enum print_line_t print_event_fields(struct trace_iterator *iter,
kernel/trace/trace_output.h
40
extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
kernel/trace/trace_output.h
9
trace_print_bputs_msg_only(struct trace_iterator *iter);
kernel/trace/trace_printk.c
55
const char **iter;
kernel/trace/trace_printk.c
63
for (iter = start; iter < end; iter++) {
kernel/trace/trace_printk.c
64
struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
kernel/trace/trace_printk.c
67
*iter = tb_fmt->fmt;
kernel/trace/trace_printk.c
74
fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL);
kernel/trace/trace_printk.c
77
strcpy(fmt, *iter);
kernel/trace/trace_printk.c
82
*iter = fmt;
kernel/trace/trace_sched_wakeup.c
183
static void wakeup_trace_open(struct trace_iterator *iter)
kernel/trace/trace_sched_wakeup.c
185
if (is_graph(iter->tr))
kernel/trace/trace_sched_wakeup.c
186
graph_trace_open(iter);
kernel/trace/trace_sched_wakeup.c
189
static void wakeup_trace_close(struct trace_iterator *iter)
kernel/trace/trace_sched_wakeup.c
191
if (iter->private)
kernel/trace/trace_sched_wakeup.c
192
graph_trace_close(iter);
kernel/trace/trace_sched_wakeup.c
202
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
kernel/trace/trace_sched_wakeup.c
208
if (is_graph(iter->tr))
kernel/trace/trace_sched_wakeup.c
209
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
kernel/trace/trace_sched_wakeup.c
302
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
kernel/trace/trace_sched_wakeup.c
307
static void wakeup_trace_open(struct trace_iterator *iter) { }
kernel/trace/trace_sched_wakeup.c
308
static void wakeup_trace_close(struct trace_iterator *iter) { }
kernel/trace/trace_syscalls.c
241
print_syscall_enter(struct trace_iterator *iter, int flags,
kernel/trace/trace_syscalls.c
244
struct trace_array *tr = iter->tr;
kernel/trace/trace_syscalls.c
245
struct trace_seq *s = &iter->seq;
kernel/trace/trace_syscalls.c
246
struct trace_entry *ent = iter->ent;
kernel/trace/trace_syscalls.c
349
print_syscall_exit(struct trace_iterator *iter, int flags,
kernel/trace/trace_syscalls.c
352
struct trace_seq *s = &iter->seq;
kernel/trace/trace_syscalls.c
353
struct trace_entry *ent = iter->ent;
kernel/trace/trace_uprobe.c
1066
print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
kernel/trace/trace_uprobe.c
1069
struct trace_seq *s = &iter->seq;
kernel/trace/trace_uprobe.c
1073
entry = (struct uprobe_trace_entry_head *)iter->ent;
kernel/tracepoint.c
524
tracepoint_ptr_t *iter;
kernel/tracepoint.c
528
for (iter = begin; iter < end; iter++)
kernel/tracepoint.c
529
fct(tracepoint_ptr_deref(iter), priv);
kernel/tracepoint.c
712
tracepoint_ptr_t *begin, *end, *iter;
kernel/tracepoint.c
722
for (iter = begin; iter < end; iter++)
kernel/tracepoint.c
723
fct(tracepoint_ptr_deref(iter), mod, priv);
kernel/ucount.c
217
struct ucounts *ucounts, *iter, *bad;
kernel/ucount.c
220
for (iter = ucounts; iter; iter = tns->ucounts) {
kernel/ucount.c
222
tns = iter->ns;
kernel/ucount.c
224
if (!atomic_long_inc_below(&iter->ucount[type], max))
kernel/ucount.c
229
bad = iter;
kernel/ucount.c
230
for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
kernel/ucount.c
231
atomic_long_dec(&iter->ucount[type]);
kernel/ucount.c
239
struct ucounts *iter;
kernel/ucount.c
240
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
kernel/ucount.c
241
long dec = atomic_long_dec_if_positive(&iter->ucount[type]);
kernel/ucount.c
249
struct ucounts *iter;
kernel/ucount.c
253
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
kernel/ucount.c
254
long new = atomic_long_add_return(v, &iter->rlimit[type]);
kernel/ucount.c
257
else if (iter == ucounts)
kernel/ucount.c
259
max = get_userns_rlimit_max(iter->ns, type);
kernel/ucount.c
266
struct ucounts *iter;
kernel/ucount.c
268
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
kernel/ucount.c
269
long dec = atomic_long_sub_return(v, &iter->rlimit[type]);
kernel/ucount.c
271
if (iter == ucounts)
kernel/ucount.c
280
struct ucounts *iter, *next;
kernel/ucount.c
281
for (iter = ucounts; iter != last; iter = next) {
kernel/ucount.c
282
long dec = atomic_long_sub_return(1, &iter->rlimit[type]);
kernel/ucount.c
284
next = iter->ns->ucounts;
kernel/ucount.c
286
put_ucounts(iter);
kernel/ucount.c
299
struct ucounts *iter;
kernel/ucount.c
303
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
kernel/ucount.c
304
long new = atomic_long_add_return(1, &iter->rlimit[type]);
kernel/ucount.c
307
if (iter == ucounts)
kernel/ucount.c
310
max = get_userns_rlimit_max(iter->ns, type);
kernel/ucount.c
317
if (!get_ucounts(iter))
kernel/ucount.c
322
dec = atomic_long_sub_return(1, &iter->rlimit[type]);
kernel/ucount.c
324
do_dec_rlimit_put_ucounts(ucounts, iter, type);
kernel/ucount.c
330
struct ucounts *iter;
kernel/ucount.c
334
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
kernel/ucount.c
335
long val = get_rlimit_value(iter, type);
kernel/ucount.c
338
max = get_userns_rlimit_max(iter->ns, type);
lib/alloc_tag.c
114
alloc_tag_to_text(&buf, priv->iter.ct);
lib/alloc_tag.c
128
struct codetag_iterator iter;
lib/alloc_tag.c
141
iter = codetag_get_ct_iter(alloc_tag_cttype);
lib/alloc_tag.c
142
while ((ct = codetag_next_ct(&iter))) {
lib/alloc_tag.c
45
struct codetag_iterator iter;
lib/alloc_tag.c
58
priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
lib/alloc_tag.c
59
codetag_next_ct(&priv->iter);
lib/alloc_tag.c
61
return priv->iter.ct ? priv : NULL;
lib/alloc_tag.c
67
struct codetag *ct = codetag_next_ct(&priv->iter);
lib/codetag.c
102
if (!iter->cmod || iter->mod_seq != cmod->mod_seq) {
lib/codetag.c
103
iter->cmod = cmod;
lib/codetag.c
104
iter->mod_seq = cmod->mod_seq;
lib/codetag.c
107
ct = get_next_module_ct(iter);
lib/codetag.c
113
iter->mod_id++;
lib/codetag.c
116
iter->ct = ct;
lib/codetag.c
53
struct codetag_iterator iter = {
lib/codetag.c
61
return iter;
lib/codetag.c
70
struct codetag *get_next_module_ct(struct codetag_iterator *iter)
lib/codetag.c
73
((char *)iter->ct + iter->cttype->desc.tag_size);
lib/codetag.c
75
return res < iter->cmod->range.stop ? res : NULL;
lib/codetag.c
78
struct codetag *codetag_next_ct(struct codetag_iterator *iter)
lib/codetag.c
80
struct codetag_type *cttype = iter->cttype;
lib/codetag.c
91
cmod = idr_find(&cttype->mod_idr, iter->mod_id);
lib/codetag.c
96
&iter->mod_id);
lib/dynamic_debug.c
1036
static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
lib/dynamic_debug.c
1039
iter->table = NULL;
lib/dynamic_debug.c
1042
iter->table = list_entry(ddebug_tables.next,
lib/dynamic_debug.c
1044
iter->idx = iter->table->num_ddebugs;
lib/dynamic_debug.c
1045
return &iter->table->ddebugs[--iter->idx];
lib/dynamic_debug.c
1054
static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
lib/dynamic_debug.c
1056
if (iter->table == NULL)
lib/dynamic_debug.c
1058
if (--iter->idx < 0) {
lib/dynamic_debug.c
1060
if (list_is_last(&iter->table->link, &ddebug_tables)) {
lib/dynamic_debug.c
1061
iter->table = NULL;
lib/dynamic_debug.c
1064
iter->table = list_entry(iter->table->link.next,
lib/dynamic_debug.c
1066
iter->idx = iter->table->num_ddebugs;
lib/dynamic_debug.c
1067
--iter->idx;
lib/dynamic_debug.c
1069
return &iter->table->ddebugs[iter->idx];
lib/dynamic_debug.c
1079
struct ddebug_iter *iter = m->private;
lib/dynamic_debug.c
1089
dp = ddebug_iter_first(iter);
lib/dynamic_debug.c
1091
dp = ddebug_iter_next(iter);
lib/dynamic_debug.c
1102
struct ddebug_iter *iter = m->private;
lib/dynamic_debug.c
1106
dp = ddebug_iter_first(iter);
lib/dynamic_debug.c
1108
dp = ddebug_iter_next(iter);
lib/dynamic_debug.c
1116
static const char *ddebug_class_name(struct ddebug_iter *iter, struct _ddebug *dp)
lib/dynamic_debug.c
1120
list_for_each_entry(map, &iter->table->maps, link)
lib/dynamic_debug.c
1135
struct ddebug_iter *iter = m->private;
lib/dynamic_debug.c
1148
iter->table->mod_name, dp->function,
lib/dynamic_debug.c
1154
class = ddebug_class_name(iter, dp);
lib/dynamic_debug.c
1410
struct _ddebug *iter, *iter_mod_start;
lib/dynamic_debug.c
1440
iter = iter_mod_start = __start___dyndbg;
lib/dynamic_debug.c
1441
modname = iter->modname;
lib/dynamic_debug.c
1444
for (; iter < __stop___dyndbg; iter++, i++, mod_sites++) {
lib/dynamic_debug.c
1446
if (strcmp(modname, iter->modname)) {
lib/dynamic_debug.c
1455
modname = iter->modname;
lib/dynamic_debug.c
1456
iter_mod_start = iter;
lib/error-inject.c
68
struct error_injection_entry *iter;
lib/error-inject.c
73
for (iter = start; iter < end; iter++) {
lib/error-inject.c
74
entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);
lib/error-inject.c
88
ent->etype = iter->etype;
lib/generic-radix-tree.c
105
if (ilog2(iter->offset) >= genradix_depth_shift(level))
lib/generic-radix-tree.c
111
i = (iter->offset >> genradix_depth_shift(level)) &
lib/generic-radix-tree.c
117
if (iter->offset + objs_per_ptr < iter->offset) {
lib/generic-radix-tree.c
118
iter->offset = SIZE_MAX;
lib/generic-radix-tree.c
119
iter->pos = SIZE_MAX;
lib/generic-radix-tree.c
124
iter->offset = round_down(iter->offset + objs_per_ptr,
lib/generic-radix-tree.c
126
iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) *
lib/generic-radix-tree.c
135
return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
lib/generic-radix-tree.c
139
void *__genradix_iter_peek_prev(struct genradix_iter *iter,
lib/generic-radix-tree.c
148
if (iter->offset == SIZE_MAX)
lib/generic-radix-tree.c
159
if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
lib/generic-radix-tree.c
160
iter->offset = genradix_depth_size(level);
lib/generic-radix-tree.c
161
iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
lib/generic-radix-tree.c
163
iter->offset -= obj_size_plus_page_remainder;
lib/generic-radix-tree.c
164
iter->pos--;
lib/generic-radix-tree.c
170
i = (iter->offset >> genradix_depth_shift(level)) &
lib/generic-radix-tree.c
176
iter->offset = round_down(iter->offset, objs_per_ptr);
lib/generic-radix-tree.c
177
iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
lib/generic-radix-tree.c
179
if (!iter->offset)
lib/generic-radix-tree.c
182
iter->offset -= obj_size_plus_page_remainder;
lib/generic-radix-tree.c
183
iter->pos--;
lib/generic-radix-tree.c
193
return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
lib/generic-radix-tree.c
86
void *__genradix_iter_peek(struct genradix_iter *iter,
lib/generic-radix-tree.c
94
if (iter->offset == SIZE_MAX)
lib/idr.c
200
struct radix_tree_iter iter;
lib/idr.c
204
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
lib/idr.c
206
unsigned long id = iter.index + base;
lib/idr.c
231
struct radix_tree_iter iter;
lib/idr.c
238
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
lib/idr.c
246
slot = radix_tree_iter_retry(&iter);
lib/idr.c
251
*nextid = iter.index + base;
lib/idr.c
36
struct radix_tree_iter iter;
lib/idr.c
47
radix_tree_iter_init(&iter, id);
lib/idr.c
48
slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
lib/idr.c
52
*nextid = iter.index + base;
lib/idr.c
54
radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
lib/idr.c
55
radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
lib/interval_tree.c
100
iter->last_used = iter->last_index;
lib/interval_tree.c
101
iter->nodes[0] = NULL;
lib/interval_tree.c
102
iter->nodes[1] = NULL;
lib/interval_tree.c
104
iter->is_hole = 0;
lib/interval_tree.c
108
if (!iter->nodes[1]) {
lib/interval_tree.c
110
iter->start_hole = iter->nodes[0]->last + 1;
lib/interval_tree.c
111
iter->last_hole = iter->last_index;
lib/interval_tree.c
112
iter->nodes[0] = NULL;
lib/interval_tree.c
113
iter->is_hole = 1;
lib/interval_tree.c
118
iter->start_hole = iter->nodes[0]->last + 1;
lib/interval_tree.c
119
iter->last_hole = iter->nodes[1]->start - 1;
lib/interval_tree.c
120
iter->is_hole = 1;
lib/interval_tree.c
121
interval_tree_span_iter_next_gap(iter);
lib/interval_tree.c
131
void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
lib/interval_tree.c
135
if (iter->is_hole == -1)
lib/interval_tree.c
138
iter->first_index = new_index;
lib/interval_tree.c
139
if (new_index > iter->last_index) {
lib/interval_tree.c
140
iter->is_hole = -1;
lib/interval_tree.c
145
if (iter->start_hole <= new_index && new_index <= iter->last_hole) {
lib/interval_tree.c
146
iter->start_hole = new_index;
lib/interval_tree.c
149
if (new_index == iter->last_hole + 1)
lib/interval_tree.c
150
interval_tree_span_iter_next(iter);
lib/interval_tree.c
152
interval_tree_span_iter_first(iter, itree, new_index,
lib/interval_tree.c
153
iter->last_index);
lib/interval_tree.c
50
void interval_tree_span_iter_first(struct interval_tree_span_iter *iter,
lib/interval_tree.c
55
iter->first_index = first_index;
lib/interval_tree.c
56
iter->last_index = last_index;
lib/interval_tree.c
57
iter->nodes[0] = NULL;
lib/interval_tree.c
58
iter->nodes[1] =
lib/interval_tree.c
60
if (!iter->nodes[1]) {
lib/interval_tree.c
62
iter->start_hole = first_index;
lib/interval_tree.c
63
iter->last_hole = last_index;
lib/interval_tree.c
64
iter->is_hole = 1;
lib/interval_tree.c
67
if (iter->nodes[1]->start > first_index) {
lib/interval_tree.c
69
iter->start_hole = first_index;
lib/interval_tree.c
70
iter->last_hole = iter->nodes[1]->start - 1;
lib/interval_tree.c
71
iter->is_hole = 1;
lib/interval_tree.c
72
interval_tree_span_iter_next_gap(iter);
lib/interval_tree.c
77
iter->start_used = first_index;
lib/interval_tree.c
78
iter->is_hole = 0;
lib/interval_tree.c
79
interval_tree_span_iter_next_gap(iter);
lib/interval_tree.c
80
iter->last_used = iter->nodes[0]->last;
lib/interval_tree.c
81
if (iter->last_used >= last_index) {
lib/interval_tree.c
82
iter->last_used = last_index;
lib/interval_tree.c
83
iter->nodes[0] = NULL;
lib/interval_tree.c
84
iter->nodes[1] = NULL;
lib/interval_tree.c
89
void interval_tree_span_iter_next(struct interval_tree_span_iter *iter)
lib/interval_tree.c
91
if (!iter->nodes[0] && !iter->nodes[1]) {
lib/interval_tree.c
92
iter->is_hole = -1;
lib/interval_tree.c
96
if (iter->is_hole) {
lib/interval_tree.c
97
iter->start_used = iter->last_hole + 1;
lib/interval_tree.c
98
iter->last_used = iter->nodes[0]->last;
lib/interval_tree.c
99
if (iter->last_used >= iter->last_index) {
lib/iov_iter.c
1898
ssize_t iov_iter_extract_bvecs(struct iov_iter *iter, struct bio_vec *bv,
lib/iov_iter.c
1917
size = iov_iter_extract_pages(iter, &pages, max_size, entries_left,
lib/iov_iter.c
1938
iov_iter_revert(iter, left);
lib/iov_iter.c
1939
if (iov_iter_extract_will_pin(iter)) {
lib/iov_iter.c
913
static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
lib/iov_iter.c
917
const struct folio_queue *folioq = iter->folioq;
lib/iov_iter.c
919
unsigned int slot = iter->folioq_slot;
lib/iov_iter.c
920
size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
lib/iov_iter.c
965
iter->count = count;
lib/iov_iter.c
966
iter->iov_offset = iov_offset;
lib/iov_iter.c
967
iter->folioq = folioq;
lib/iov_iter.c
968
iter->folioq_slot = slot;
lib/math/cordic.c
50
unsigned iter;
lib/math/cordic.c
71
for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
lib/math/cordic.c
73
valtmp = coord.i - (coord.q >> iter);
lib/math/cordic.c
74
coord.q += (coord.i >> iter);
lib/math/cordic.c
75
angle += arctan_table[iter];
lib/math/cordic.c
77
valtmp = coord.i + (coord.q >> iter);
lib/math/cordic.c
78
coord.q -= (coord.i >> iter);
lib/math/cordic.c
79
angle -= arctan_table[iter];
lib/plist.c
101
prev = iter;
lib/plist.c
102
iter = list_entry(iter->prio_list.next,
lib/plist.c
106
} while (iter != first);
lib/plist.c
109
list_add_tail(&node->prio_list, &iter->prio_list);
lib/plist.c
157
struct plist_node *iter;
lib/plist.c
167
iter = plist_next(node);
lib/plist.c
169
if (node->prio != iter->prio)
lib/plist.c
178
if (!list_empty(&iter->prio_list)) {
lib/plist.c
179
iter = list_entry(iter->prio_list.next, struct plist_node,
lib/plist.c
181
node_next = &iter->node_list;
lib/plist.c
185
plist_for_each_continue(iter, head) {
lib/plist.c
186
if (node->prio != iter->prio) {
lib/plist.c
187
node_next = &iter->node_list;
lib/plist.c
75
struct plist_node *first, *iter, *prev = NULL, *last, *reverse_iter;
lib/plist.c
85
first = iter = plist_first(head);
lib/plist.c
89
if (node->prio < iter->prio) {
lib/plist.c
90
node_next = &iter->node_list;
lib/plist.c
94
iter = list_entry(reverse_iter->prio_list.next,
lib/plist.c
97
node_next = &iter->node_list;
lib/radix-tree.c
1061
const struct radix_tree_iter *iter, unsigned int tag)
lib/radix-tree.c
1063
node_tag_clear(root, iter->node, tag, iter_offset(iter));
lib/radix-tree.c
1111
static void set_iter_tags(struct radix_tree_iter *iter,
lib/radix-tree.c
1119
iter->tags = 1;
lib/radix-tree.c
1123
iter->tags = node->tags[tag][tag_long] >> tag_bit;
lib/radix-tree.c
1129
iter->tags |= node->tags[tag][tag_long + 1] <<
lib/radix-tree.c
1132
iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
lib/radix-tree.c
1137
struct radix_tree_iter *iter)
lib/radix-tree.c
1139
iter->index = __radix_tree_iter_add(iter, 1);
lib/radix-tree.c
1140
iter->next_index = iter->index;
lib/radix-tree.c
1141
iter->tags = 0;
lib/radix-tree.c
1155
struct radix_tree_iter *iter, unsigned flags)
lib/radix-tree.c
1173
index = iter->next_index;
lib/radix-tree.c
1174
if (!index && iter->index)
lib/radix-tree.c
1186
iter->index = index;
lib/radix-tree.c
1187
iter->next_index = maxindex + 1;
lib/radix-tree.c
1188
iter->tags = 1;
lib/radix-tree.c
1189
iter->node = NULL;
lib/radix-tree.c
1230
iter->index = (index &~ node_maxindex(node)) | offset;
lib/radix-tree.c
1231
iter->next_index = (index | node_maxindex(node)) + 1;
lib/radix-tree.c
1232
iter->node = node;
lib/radix-tree.c
1235
set_iter_tags(iter, node, offset, tag);
lib/radix-tree.c
1265
struct radix_tree_iter iter;
lib/radix-tree.c
1272
radix_tree_for_each_slot(slot, root, &iter, first_index) {
lib/radix-tree.c
1277
slot = radix_tree_iter_retry(&iter);
lib/radix-tree.c
1306
struct radix_tree_iter iter;
lib/radix-tree.c
1313
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
lib/radix-tree.c
1318
slot = radix_tree_iter_retry(&iter);
lib/radix-tree.c
1347
struct radix_tree_iter iter;
lib/radix-tree.c
1354
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
lib/radix-tree.c
1395
struct radix_tree_iter *iter, void __rcu **slot)
lib/radix-tree.c
1397
if (__radix_tree_delete(root, iter->node, slot))
lib/radix-tree.c
1398
iter->index = iter->next_index;
lib/radix-tree.c
1477
struct radix_tree_iter *iter, gfp_t gfp,
lib/radix-tree.c
1482
unsigned long maxindex, start = iter->next_index;
lib/radix-tree.c
1537
iter->index = start;
lib/radix-tree.c
1539
iter->next_index = 1 + min(max, (start | node_maxindex(node)));
lib/radix-tree.c
1541
iter->next_index = 1;
lib/radix-tree.c
1542
iter->node = node;
lib/radix-tree.c
1543
set_iter_tags(iter, node, offset, IDR_FREE);
lib/radix-tree.c
203
static unsigned int iter_offset(const struct radix_tree_iter *iter)
lib/radix-tree.c
205
return iter->index & RADIX_TREE_MAP_MASK;
lib/radix-tree.c
932
const struct radix_tree_iter *iter,
lib/radix-tree.c
935
__radix_tree_replace(root, iter->node, slot, item);
lib/rhashtable.c
679
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
lib/rhashtable.c
681
iter->ht = ht;
lib/rhashtable.c
682
iter->p = NULL;
lib/rhashtable.c
683
iter->slot = 0;
lib/rhashtable.c
684
iter->skip = 0;
lib/rhashtable.c
685
iter->end_of_table = 0;
lib/rhashtable.c
688
iter->walker.tbl =
lib/rhashtable.c
690
list_add(&iter->walker.list, &iter->walker.tbl->walkers);
lib/rhashtable.c
701
void rhashtable_walk_exit(struct rhashtable_iter *iter)
lib/rhashtable.c
703
spin_lock(&iter->ht->lock);
lib/rhashtable.c
704
if (iter->walker.tbl)
lib/rhashtable.c
705
list_del(&iter->walker.list);
lib/rhashtable.c
706
spin_unlock(&iter->ht->lock);
lib/rhashtable.c
728
int rhashtable_walk_start_check(struct rhashtable_iter *iter)
lib/rhashtable.c
731
struct rhashtable *ht = iter->ht;
lib/rhashtable.c
737
if (iter->walker.tbl)
lib/rhashtable.c
738
list_del(&iter->walker.list);
lib/rhashtable.c
741
if (iter->end_of_table)
lib/rhashtable.c
743
if (!iter->walker.tbl) {
lib/rhashtable.c
744
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
lib/rhashtable.c
745
iter->slot = 0;
lib/rhashtable.c
746
iter->skip = 0;
lib/rhashtable.c
750
if (iter->p && !rhlist) {
lib/rhashtable.c
757
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
lib/rhashtable.c
759
if (p == iter->p) {
lib/rhashtable.c
760
iter->skip = skip;
lib/rhashtable.c
764
iter->p = NULL;
lib/rhashtable.c
765
} else if (iter->p && rhlist) {
lib/rhashtable.c
772
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
lib/rhashtable.c
777
if (list == iter->list) {
lib/rhashtable.c
778
iter->p = p;
lib/rhashtable.c
779
iter->skip = skip;
lib/rhashtable.c
784
iter->p = NULL;
lib/rhashtable.c
801
static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
lib/rhashtable.c
803
struct bucket_table *tbl = iter->walker.tbl;
lib/rhashtable.c
804
struct rhlist_head *list = iter->list;
lib/rhashtable.c
805
struct rhashtable *ht = iter->ht;
lib/rhashtable.c
806
struct rhash_head *p = iter->p;
lib/rhashtable.c
812
for (; iter->slot < tbl->size; iter->slot++) {
lib/rhashtable.c
813
int skip = iter->skip;
lib/rhashtable.c
815
rht_for_each_rcu(p, tbl, iter->slot) {
lib/rhashtable.c
835
iter->skip++;
lib/rhashtable.c
836
iter->p = p;
lib/rhashtable.c
837
iter->list = list;
lib/rhashtable.c
841
iter->skip = 0;
lib/rhashtable.c
844
iter->p = NULL;
lib/rhashtable.c
849
iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
lib/rhashtable.c
850
if (iter->walker.tbl) {
lib/rhashtable.c
851
iter->slot = 0;
lib/rhashtable.c
852
iter->skip = 0;
lib/rhashtable.c
855
iter->end_of_table = true;
lib/rhashtable.c
873
void *rhashtable_walk_next(struct rhashtable_iter *iter)
lib/rhashtable.c
875
struct rhlist_head *list = iter->list;
lib/rhashtable.c
876
struct rhashtable *ht = iter->ht;
lib/rhashtable.c
877
struct rhash_head *p = iter->p;
lib/rhashtable.c
886
iter->skip++;
lib/rhashtable.c
887
iter->p = p;
lib/rhashtable.c
888
iter->list = list;
lib/rhashtable.c
895
iter->skip = 0;
lib/rhashtable.c
896
iter->slot++;
lib/rhashtable.c
899
return __rhashtable_walk_find_next(iter);
lib/rhashtable.c
912
void *rhashtable_walk_peek(struct rhashtable_iter *iter)
lib/rhashtable.c
914
struct rhlist_head *list = iter->list;
lib/rhashtable.c
915
struct rhashtable *ht = iter->ht;
lib/rhashtable.c
916
struct rhash_head *p = iter->p;
lib/rhashtable.c
923
if (iter->skip) {
lib/rhashtable.c
930
iter->skip--;
lib/rhashtable.c
933
return __rhashtable_walk_find_next(iter);
lib/rhashtable.c
944
void rhashtable_walk_stop(struct rhashtable_iter *iter)
lib/rhashtable.c
947
struct bucket_table *tbl = iter->walker.tbl;
lib/rhashtable.c
952
ht = iter->ht;
lib/rhashtable.c
957
iter->walker.tbl = NULL;
lib/rhashtable.c
959
list_add(&iter->walker.list, &tbl->walkers);
lib/scatterlist.c
1113
static ssize_t extract_user_to_sg(struct iov_iter *iter,
lib/scatterlist.c
1131
res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
lib/scatterlist.c
1167
static ssize_t extract_bvec_to_sg(struct iov_iter *iter,
lib/scatterlist.c
1173
const struct bio_vec *bv = iter->bvec;
lib/scatterlist.c
1175
unsigned long start = iter->iov_offset;
lib/scatterlist.c
1179
for (i = 0; i < iter->nr_segs; i++) {
lib/scatterlist.c
1204
iov_iter_advance(iter, ret);
lib/scatterlist.c
1213
static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
lib/scatterlist.c
1219
const struct kvec *kv = iter->kvec;
lib/scatterlist.c
1221
unsigned long start = iter->iov_offset;
lib/scatterlist.c
1225
for (i = 0; i < iter->nr_segs; i++) {
lib/scatterlist.c
1266
iov_iter_advance(iter, ret);
lib/scatterlist.c
1274
static ssize_t extract_folioq_to_sg(struct iov_iter *iter,
lib/scatterlist.c
1280
const struct folio_queue *folioq = iter->folioq;
lib/scatterlist.c
1282
unsigned int slot = iter->folioq_slot;
lib/scatterlist.c
1284
size_t offset = iter->iov_offset;
lib/scatterlist.c
1315
WARN_ON_ONCE(ret < iter->count);
lib/scatterlist.c
1324
iter->folioq = folioq;
lib/scatterlist.c
1325
iter->folioq_slot = slot;
lib/scatterlist.c
1326
iter->iov_offset = offset;
lib/scatterlist.c
1327
iter->count -= ret;
lib/scatterlist.c
1335
static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
lib/scatterlist.c
1342
struct xarray *xa = iter->xarray;
lib/scatterlist.c
1344
loff_t start = iter->xarray_start + iter->iov_offset;
lib/scatterlist.c
1376
iov_iter_advance(iter, ret);
lib/scatterlist.c
1408
ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
lib/scatterlist.c
1415
switch (iov_iter_type(iter)) {
lib/scatterlist.c
1418
return extract_user_to_sg(iter, maxsize, sgtable, sg_max,
lib/scatterlist.c
1421
return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max,
lib/scatterlist.c
1424
return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
lib/scatterlist.c
1427
return extract_folioq_to_sg(iter, maxsize, sgtable, sg_max,
lib/scatterlist.c
1430
return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
lib/scatterlist.c
1433
pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter));
lib/test_lockup.c
352
unsigned int iter = 0;
lib/test_lockup.c
361
while (iter++ < iterations && !signal_pending(main_task)) {
lib/test_meminit.c
221
int iter;
lib/test_meminit.c
229
for (iter = 0; iter < 10; iter++) {
lib/test_meminit.c
297
int i, iter, maxiter = 1024;
lib/test_meminit.c
324
for (iter = 0; iter < maxiter; iter++) {
lib/test_meminit.c
326
used_objects[iter] = buf;
lib/test_meminit.c
329
for (i = 0; i <= iter; i++)
lib/test_meminit.c
335
for (iter = 0; iter < maxiter; iter++)
lib/test_meminit.c
336
kmem_cache_free(c, used_objects[iter]);
lib/test_meminit.c
350
int i, iter, maxiter = 1024;
lib/test_meminit.c
356
for (iter = 0; (iter < maxiter) && !fail; iter++) {
lib/tests/cpumask_kunit.c
20
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
22
iter++; \
lib/tests/cpumask_kunit.c
23
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
lib/tests/cpumask_kunit.c
31
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
35
iter++; \
lib/tests/cpumask_kunit.c
36
KUNIT_EXPECT_EQ((test), weight, iter); \
lib/tests/cpumask_kunit.c
43
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
45
iter++; \
lib/tests/cpumask_kunit.c
46
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
lib/tests/cpumask_kunit.c
52
int cpu, iter = 0; \
lib/tests/cpumask_kunit.c
54
iter++; \
lib/tests/cpumask_kunit.c
55
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
lib/tests/kunit_iov_iter.c
1001
} while (iov_iter_count(&iter) > 0);
lib/tests/kunit_iov_iter.c
1004
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
1005
KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
lib/tests/kunit_iov_iter.c
102
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
119
iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
lib/tests/kunit_iov_iter.c
121
size = iter.count;
lib/tests/kunit_iov_iter.c
123
copied = copy_to_iter(scratch, size, &iter);
lib/tests/kunit_iov_iter.c
126
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
127
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
lib/tests/kunit_iov_iter.c
152
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
169
iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
lib/tests/kunit_iov_iter.c
171
size = min(iter.count, bufsize);
lib/tests/kunit_iov_iter.c
173
copied = copy_from_iter(scratch, size, &iter);
lib/tests/kunit_iov_iter.c
176
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
177
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
lib/tests/kunit_iov_iter.c
218
struct iov_iter *iter, int dir,
lib/tests/kunit_iov_iter.c
252
iov_iter_bvec(iter, dir, bvec, i, size);
lib/tests/kunit_iov_iter.c
261
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
278
iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
lib/tests/kunit_iov_iter.c
280
size = iter.count;
lib/tests/kunit_iov_iter.c
282
copied = copy_to_iter(scratch, size, &iter);
lib/tests/kunit_iov_iter.c
285
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
286
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
lib/tests/kunit_iov_iter.c
315
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
332
iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
lib/tests/kunit_iov_iter.c
334
size = iter.count;
lib/tests/kunit_iov_iter.c
336
copied = copy_from_iter(scratch, size, &iter);
lib/tests/kunit_iov_iter.c
339
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
340
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
lib/tests/kunit_iov_iter.c
380
struct iov_iter *iter, int dir,
lib/tests/kunit_iov_iter.c
399
iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
lib/tests/kunit_iov_iter.c
419
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
438
iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
lib/tests/kunit_iov_iter.c
445
iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
lib/tests/kunit_iov_iter.c
446
iov_iter_advance(&iter, pr->from);
lib/tests/kunit_iov_iter.c
447
copied = copy_to_iter(scratch + i, size, &iter);
lib/tests/kunit_iov_iter.c
450
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
451
KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
481
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
500
iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
lib/tests/kunit_iov_iter.c
507
iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
lib/tests/kunit_iov_iter.c
508
iov_iter_advance(&iter, pr->from);
lib/tests/kunit_iov_iter.c
509
copied = copy_from_iter(scratch + i, size, &iter);
lib/tests/kunit_iov_iter.c
512
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
513
KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
lib/tests/kunit_iov_iter.c
548
struct iov_iter *iter, int dir,
lib/tests/kunit_iov_iter.c
561
iov_iter_xarray(iter, dir, xarray, 0, size);
lib/tests/kunit_iov_iter.c
581
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
600
iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
lib/tests/kunit_iov_iter.c
607
iov_iter_xarray(&iter, READ, xarray, pr->from, size);
lib/tests/kunit_iov_iter.c
608
copied = copy_to_iter(scratch + i, size, &iter);
lib/tests/kunit_iov_iter.c
611
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
612
KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
lib/tests/kunit_iov_iter.c
639
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
658
iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
lib/tests/kunit_iov_iter.c
665
iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
lib/tests/kunit_iov_iter.c
666
copied = copy_from_iter(scratch + i, size, &iter);
lib/tests/kunit_iov_iter.c
669
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
670
KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
lib/tests/kunit_iov_iter.c
702
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
715
iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
lib/tests/kunit_iov_iter.c
717
size = iter.count;
lib/tests/kunit_iov_iter.c
727
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
lib/tests/kunit_iov_iter.c
735
KUNIT_EXPECT_EQ(test, iter.count, size - len);
lib/tests/kunit_iov_iter.c
74
struct iov_iter *iter, int dir,
lib/tests/kunit_iov_iter.c
768
} while (iov_iter_count(&iter) > 0);
lib/tests/kunit_iov_iter.c
772
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
782
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
793
iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
lib/tests/kunit_iov_iter.c
795
size = iter.count;
lib/tests/kunit_iov_iter.c
805
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
lib/tests/kunit_iov_iter.c
813
KUNIT_EXPECT_EQ(test, iter.count, size - len);
lib/tests/kunit_iov_iter.c
846
} while (iov_iter_count(&iter) > 0);
lib/tests/kunit_iov_iter.c
850
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
861
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
873
iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
lib/tests/kunit_iov_iter.c
880
iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
lib/tests/kunit_iov_iter.c
881
iov_iter_advance(&iter, from);
lib/tests/kunit_iov_iter.c
889
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
lib/tests/kunit_iov_iter.c
895
KUNIT_EXPECT_EQ(test, iter.count, size - len);
lib/tests/kunit_iov_iter.c
923
} while (iov_iter_count(&iter) > 0);
lib/tests/kunit_iov_iter.c
926
KUNIT_EXPECT_EQ(test, iter.count, 0);
lib/tests/kunit_iov_iter.c
93
iov_iter_kvec(iter, dir, kvec, i, size);
lib/tests/kunit_iov_iter.c
939
struct iov_iter iter;
lib/tests/kunit_iov_iter.c
952
iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
lib/tests/kunit_iov_iter.c
959
iov_iter_xarray(&iter, WRITE, xarray, from, size);
lib/tests/kunit_iov_iter.c
967
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
lib/tests/kunit_iov_iter.c
973
KUNIT_EXPECT_EQ(test, iter.count, size - len);
mm/backing-dev.c
853
struct radix_tree_iter iter;
mm/backing-dev.c
860
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
mm/filemap.c
2768
ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
mm/filemap.c
2785
if (unlikely(!iov_iter_count(iter)))
mm/filemap.c
2788
iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos);
mm/filemap.c
2805
error = filemap_get_pages(iocb, iter->count, &fbatch, false);
mm/filemap.c
2820
end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
mm/filemap.c
2856
copied = copy_folio_to_iter(folio, offset, bytes, iter);
mm/filemap.c
2875
} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
mm/filemap.c
2956
generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
mm/filemap.c
2958
size_t count = iov_iter_count(iter);
mm/filemap.c
2974
retval = mapping->a_ops->direct_IO(iocb, iter);
mm/filemap.c
2980
iov_iter_revert(iter, count - iov_iter_count(iter));
mm/filemap.c
2997
return filemap_read(iocb, iter, retval);
mm/hugetlb.c
603
struct file_region *iter, *trg = NULL;
mm/hugetlb.c
613
list_for_each_entry_safe(iter, trg, head, link) {
mm/hugetlb.c
615
if (iter->from < f) {
mm/hugetlb.c
619
if (iter->to > last_accounted_offset)
mm/hugetlb.c
620
last_accounted_offset = iter->to;
mm/hugetlb.c
627
if (iter->from >= t) {
mm/hugetlb.c
628
rg = iter->link.prev;
mm/hugetlb.c
635
if (iter->from > last_accounted_offset)
mm/hugetlb.c
636
add += hugetlb_resv_map_add(resv, iter->link.prev,
mm/hugetlb.c
638
iter->from, h, h_cg,
mm/hugetlb.c
641
last_accounted_offset = iter->to;
mm/kfence/kfence_test.c
551
int iter;
mm/kfence/kfence_test.c
553
for (iter = 0; iter < 5; iter++) {
mm/kfence/kfence_test.c
555
0, (iter & 1) ? ctor_set_x : NULL);
mm/kmsan/core.c
111
iter = backwards ? n - 1 : 0;
mm/kmsan/core.c
116
for (i = 0; i < n; i++, iter += step) {
mm/kmsan/core.c
117
oiter_src = (iter + src_off) / KMSAN_ORIGIN_SIZE;
mm/kmsan/core.c
118
oiter_dst = (iter + dst_off) / KMSAN_ORIGIN_SIZE;
mm/kmsan/core.c
119
if (!shadow_src[iter]) {
mm/kmsan/core.c
120
shadow_dst[iter] = 0;
mm/kmsan/core.c
125
shadow_dst[iter] = shadow_src[iter];
mm/kmsan/core.c
83
int i, iter, step, src_off, dst_off, oiter_src, oiter_dst;
mm/madvise.c
2041
static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
mm/madvise.c
2053
total_len = iov_iter_count(iter);
mm/madvise.c
2060
while (iov_iter_count(iter)) {
mm/madvise.c
2061
unsigned long start = (unsigned long)iter_iov_addr(iter);
mm/madvise.c
2062
size_t len_in = iter_iov_len(iter);
mm/madvise.c
2096
iov_iter_advance(iter, iter_iov_len(iter));
mm/madvise.c
2102
ret = (total_len - iov_iter_count(iter)) ? : ret;
mm/madvise.c
2113
struct iov_iter iter;
mm/madvise.c
2123
ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
mm/madvise.c
2159
ret = vector_madvise(mm, &iter, behavior);
mm/memcontrol-v1.c
1246
struct mem_cgroup *iter, *failed = NULL;
mm/memcontrol-v1.c
1250
for_each_mem_cgroup_tree(iter, memcg) {
mm/memcontrol-v1.c
1251
if (iter->oom_lock) {
mm/memcontrol-v1.c
1256
failed = iter;
mm/memcontrol-v1.c
1257
mem_cgroup_iter_break(memcg, iter);
mm/memcontrol-v1.c
1260
iter->oom_lock = true;
mm/memcontrol-v1.c
1268
for_each_mem_cgroup_tree(iter, memcg) {
mm/memcontrol-v1.c
1269
if (iter == failed) {
mm/memcontrol-v1.c
1270
mem_cgroup_iter_break(memcg, iter);
mm/memcontrol-v1.c
1273
iter->oom_lock = false;
mm/memcontrol-v1.c
1285
struct mem_cgroup *iter;
mm/memcontrol-v1.c
1289
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
1290
iter->oom_lock = false;
mm/memcontrol-v1.c
1296
struct mem_cgroup *iter;
mm/memcontrol-v1.c
1299
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
1300
iter->under_oom++;
mm/memcontrol-v1.c
1306
struct mem_cgroup *iter;
mm/memcontrol-v1.c
1313
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
1314
if (iter->under_oom > 0)
mm/memcontrol-v1.c
1315
iter->under_oom--;
mm/memcontrol-v1.c
749
struct mem_cgroup *iter;
mm/memcontrol-v1.c
751
for_each_mem_cgroup_tree(iter, memcg)
mm/memcontrol-v1.c
752
mem_cgroup_oom_notify_cb(iter);
mm/memcontrol-v1.h
15
#define for_each_mem_cgroup_tree(iter, root) \
mm/memcontrol-v1.h
16
for (iter = mem_cgroup_iter(root, NULL, NULL); \
mm/memcontrol-v1.h
17
iter != NULL; \
mm/memcontrol-v1.h
18
iter = mem_cgroup_iter(root, iter, NULL))
mm/memcontrol-v1.h
20
#define for_each_mem_cgroup(iter) \
mm/memcontrol-v1.h
21
for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
mm/memcontrol-v1.h
22
iter != NULL; \
mm/memcontrol-v1.h
23
iter = mem_cgroup_iter(NULL, iter, NULL))
mm/memcontrol.c
1025
struct mem_cgroup_reclaim_iter *iter;
mm/memcontrol.c
1044
iter = &root->nodeinfo[nid]->iter;
mm/memcontrol.c
1045
gen = atomic_read(&iter->generation);
mm/memcontrol.c
1056
pos = READ_ONCE(iter->position);
mm/memcontrol.c
1080
if (cmpxchg(&iter->position, pos, next) != pos) {
mm/memcontrol.c
1087
atomic_inc(&iter->generation);
mm/memcontrol.c
1125
struct mem_cgroup_reclaim_iter *iter;
mm/memcontrol.c
1131
iter = &mz->iter;
mm/memcontrol.c
1132
cmpxchg(&iter->position, dead_memcg, NULL);
mm/memcontrol.c
1173
struct mem_cgroup *iter;
mm/memcontrol.c
1178
for_each_mem_cgroup_tree(iter, memcg) {
mm/memcontrol.c
1182
css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
mm/memcontrol.c
1190
mem_cgroup_iter_break(memcg, iter);
mm/memcontrol.c
212
struct obj_cgroup *objcg, *iter;
mm/memcontrol.c
221
list_for_each_entry(iter, &memcg->objcg_list, list)
mm/memcontrol.c
222
WRITE_ONCE(iter->memcg, parent);
mm/nommu.c
178
long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
mm/nommu.c
184
return copy_to_iter(addr, count, iter);
mm/page_owner.c
251
struct page_ext_iter iter;
mm/page_owner.c
256
for_each_page_ext(page, 1 << order, page_ext, iter) {
mm/page_owner.c
279
struct page_ext_iter iter;
mm/page_owner.c
284
for_each_page_ext(page, 1 << order, page_ext, iter) {
mm/page_owner.c
363
struct page_ext_iter iter;
mm/page_owner.c
368
for_each_page_ext(page, 1 << old_order, page_ext, iter) {
mm/page_owner.c
378
struct page_ext_iter iter;
mm/page_owner.c
418
for_each_page_ext(&old->page, 1 << new_page_owner->order, page_ext, iter) {
mm/page_table_check.c
100
struct page_ext_iter iter;
mm/page_table_check.c
113
for_each_page_ext(page, pgcnt, page_ext, iter) {
mm/page_table_check.c
133
struct page_ext_iter iter;
mm/page_table_check.c
139
for_each_page_ext(page, 1 << order, page_ext, iter) {
mm/page_table_check.c
65
struct page_ext_iter iter;
mm/page_table_check.c
78
for_each_page_ext(page, pgcnt, page_ext, iter) {
mm/process_vm_access.c
119
start_offset, bytes, iter,
mm/process_vm_access.c
151
static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
mm/process_vm_access.c
165
size_t total_len = iov_iter_count(iter);
mm/process_vm_access.c
215
for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
mm/process_vm_access.c
218
iter, process_pages, mm, task, vm_write);
mm/process_vm_access.c
221
total_len -= iov_iter_count(iter);
mm/process_vm_access.c
265
struct iov_iter iter;
mm/process_vm_access.c
273
rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
mm/process_vm_access.c
276
if (!iov_iter_count(&iter))
mm/process_vm_access.c
284
rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
mm/process_vm_access.c
30
struct iov_iter *iter,
mm/process_vm_access.c
34
while (len && iov_iter_count(iter)) {
mm/process_vm_access.c
43
copied = copy_page_from_iter(page, offset, copy, iter);
mm/process_vm_access.c
45
copied = copy_page_to_iter(page, offset, copy, iter);
mm/process_vm_access.c
48
if (copied < copy && iov_iter_count(iter))
mm/process_vm_access.c
75
struct iov_iter *iter,
mm/process_vm_access.c
95
while (!rc && nr_pages && iov_iter_count(iter)) {
mm/shmem.c
887
swp_entry_t iter, swap;
mm/shmem.c
903
iter = swap;
mm/shmem.c
912
if (!expected || entry != swp_to_radix_entry(iter)) {
mm/shmem.c
916
iter.val += 1 << xas_get_order(&xas);
mm/shmem.c
918
if (expected && iter.val - nr != swap.val) {
mm/slub.c
1942
const char *iter;
mm/slub.c
1963
next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
mm/slub.c
1964
if (!iter)
mm/slub.c
1967
while (*iter) {
mm/slub.c
1971
end = strchrnul(iter, ',');
mm/slub.c
1975
glob = strnchr(iter, end - iter, '*');
mm/slub.c
1977
cmplen = glob - iter;
mm/slub.c
1979
cmplen = max_t(size_t, len, (end - iter));
mm/slub.c
1981
if (!strncmp(name, iter, cmplen)) {
mm/slub.c
1988
iter = end + 1;
mm/swap_cgroup.c
105
if (!iter)
mm/swap_cgroup.c
106
iter = old;
mm/swap_cgroup.c
107
VM_BUG_ON(iter != old);
mm/swap_cgroup.c
97
unsigned short old, iter = 0;
mm/vmalloc.c
4423
static size_t zero_iter(struct iov_iter *iter, size_t count)
mm/vmalloc.c
4431
copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
mm/vmalloc.c
4447
static size_t aligned_vread_iter(struct iov_iter *iter,
mm/vmalloc.c
4472
length, iter);
mm/vmalloc.c
4474
copied = zero_iter(iter, length);
mm/vmalloc.c
4491
static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
mm/vmalloc.c
4507
return aligned_vread_iter(iter, addr, count);
mm/vmalloc.c
4536
size_t zeroed = zero_iter(iter, to_zero);
mm/vmalloc.c
4551
copied = aligned_vread_iter(iter, start + offset, n);
mm/vmalloc.c
4564
return count - remains + zero_iter(iter, remains);
mm/vmalloc.c
4595
long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
mm/vmalloc.c
4651
size_t zeroed = zero_iter(iter, to_zero);
mm/vmalloc.c
4665
copied = vmap_ram_vread_iter(iter, addr, n, flags);
mm/vmalloc.c
4667
copied = aligned_vread_iter(iter, addr, n);
mm/vmalloc.c
4669
copied = zero_iter(iter, n);
mm/vmalloc.c
4687
return count - remains + zero_iter(iter, remains);
mm/vmscan.c
3043
static bool iterate_mm_list(struct lru_gen_mm_walk *walk, struct mm_struct **iter)
mm/vmscan.c
3091
if (*iter || last)
mm/vmscan.c
3099
if (*iter)
mm/vmscan.c
3100
mmput_async(*iter);
mm/vmscan.c
3102
*iter = mm;
net/appletalk/aarp.c
1006
if (iter->table == unresolved)
net/appletalk/aarp.c
1013
(iter->table == resolved) ? "resolved"
net/appletalk/aarp.c
1014
: (iter->table == unresolved) ? "unresolved"
net/appletalk/aarp.c
1015
: (iter->table == proxies) ? "proxies"
net/appletalk/aarp.c
906
static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos)
net/appletalk/aarp.c
908
int ct = iter->bucket;
net/appletalk/aarp.c
909
struct aarp_entry **table = iter->table;
net/appletalk/aarp.c
917
iter->table = table;
net/appletalk/aarp.c
918
iter->bucket = ct;
net/appletalk/aarp.c
941
struct aarp_iter_state *iter = seq->private;
net/appletalk/aarp.c
944
iter->table = resolved;
net/appletalk/aarp.c
945
iter->bucket = 0;
net/appletalk/aarp.c
947
return *pos ? iter_next(iter, pos) : SEQ_START_TOKEN;
net/appletalk/aarp.c
953
struct aarp_iter_state *iter = seq->private;
net/appletalk/aarp.c
959
entry = iter_next(iter, NULL);
net/appletalk/aarp.c
967
++iter->bucket;
net/appletalk/aarp.c
968
entry = iter_next(iter, NULL);
net/appletalk/aarp.c
990
struct aarp_iter_state *iter = seq->private;
net/batman-adv/bat_iv_ogm.c
1566
struct list_head *iter;
net/batman-adv/bat_iv_ogm.c
1604
netdev_for_each_lower_private_rcu(if_incoming->mesh_iface, hard_iface, iter) {
net/batman-adv/bat_iv_ogm.c
1666
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/bat_iv_ogm.c
2137
struct list_head *iter;
net/batman-adv/bat_iv_ogm.c
2154
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/bat_iv_ogm.c
796
struct list_head *iter;
net/batman-adv/bat_iv_ogm.c
853
netdev_for_each_lower_private_rcu(hard_iface->mesh_iface, tmp_hard_iface, iter) {
net/batman-adv/bat_v.c
215
struct list_head *iter;
net/batman-adv/bat_v.c
231
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/bat_v_elp.c
482
struct list_head *iter;
net/batman-adv/bat_v_elp.c
486
netdev_for_each_lower_private_rcu(primary_iface->mesh_iface, hard_iface, iter)
net/batman-adv/bat_v_ogm.c
266
struct list_head *iter;
net/batman-adv/bat_v_ogm.c
303
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/bat_v_ogm.c
858
struct list_head *iter;
net/batman-adv/bat_v_ogm.c
921
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/hard-interface.c
445
struct list_head *iter;
net/batman-adv/hard-interface.c
448
netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) {
net/batman-adv/hard-interface.c
512
struct list_head *iter;
net/batman-adv/hard-interface.c
517
netdev_for_each_lower_private(mesh_iface, tmp_hard_iface, iter) {
net/batman-adv/hard-interface.c
545
struct list_head *iter;
net/batman-adv/hard-interface.c
548
netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) {
net/batman-adv/hard-interface.c
584
struct list_head *iter;
net/batman-adv/hard-interface.c
588
netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) {
net/batman-adv/hard-interface.c
802
struct list_head *iter;
net/batman-adv/hard-interface.c
806
netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter)
net/batman-adv/main.c
293
struct list_head *iter;
net/batman-adv/main.c
297
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/multicast.c
249
struct list_head *iter;
net/batman-adv/multicast.c
252
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/netlink.c
953
struct list_head *iter;
net/batman-adv/netlink.c
965
netdev_for_each_lower_private(mesh_iface, hard_iface, iter) {
net/batman-adv/originator.c
1209
struct list_head *iter;
net/batman-adv/originator.c
1234
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/batman-adv/send.c
912
struct list_head *iter;
net/batman-adv/send.c
920
netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) {
net/bridge/br_switchdev.c
579
struct list_head *iter;
net/bridge/br_switchdev.c
581
netdev_for_each_lower_dev(dev, lower_dev, iter)
net/bridge/netfilter/nf_conntrack_bridge.c
59
struct ip_fraglist_iter iter;
net/bridge/netfilter/nf_conntrack_bridge.c
78
ip_fraglist_init(skb, iph, hlen, &iter);
net/bridge/netfilter/nf_conntrack_bridge.c
81
if (iter.frag)
net/bridge/netfilter/nf_conntrack_bridge.c
82
ip_fraglist_prepare(skb, &iter);
net/bridge/netfilter/nf_conntrack_bridge.c
86
if (err || !iter.frag)
net/bridge/netfilter/nf_conntrack_bridge.c
89
skb = ip_fraglist_next(&iter);
net/bridge/netfilter/nf_conntrack_bridge.c
95
kfree_skb_list(iter.frag);
net/ceph/messenger.c
1946
msg->data_length += bvec_pos->iter.bi_size;
net/ceph/messenger.c
1951
struct iov_iter *iter)
net/ceph/messenger.c
1957
data->iter = *iter;
net/ceph/messenger.c
1959
msg->data_length += iov_iter_count(&data->iter);
net/ceph/messenger.c
733
if (cursor->resid < it->iter.bi_size)
net/ceph/messenger.c
734
it->iter.bi_size = cursor->resid;
net/ceph/messenger.c
736
BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
net/ceph/messenger.c
744
cursor->bio_iter.iter);
net/ceph/messenger.c
755
struct page *page = bio_iter_page(it->bio, it->iter);
net/ceph/messenger.c
758
BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
net/ceph/messenger.c
760
bio_advance_iter(it->bio, &it->iter, bytes);
net/ceph/messenger.c
765
if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
net/ceph/messenger.c
766
page == bio_iter_page(it->bio, it->iter)))
net/ceph/messenger.c
769
if (!it->iter.bi_size) {
net/ceph/messenger.c
771
it->iter = it->bio->bi_iter;
net/ceph/messenger.c
772
if (cursor->resid < it->iter.bi_size)
net/ceph/messenger.c
773
it->iter.bi_size = cursor->resid;
net/ceph/messenger.c
776
BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
net/ceph/messenger.c
787
cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
net/ceph/messenger.c
788
cursor->bvec_iter = data->bvec_pos.iter;
net/ceph/messenger.c
978
cursor->iov_iter = data->iter;
net/ceph/osd_client.c
175
struct iov_iter *iter)
net/ceph/osd_client.c
178
osd_data->iter = *iter;
net/ceph/osd_client.c
245
.iter = { .bi_size = bytes },
net/ceph/osd_client.c
271
unsigned int which, struct iov_iter *iter)
net/ceph/osd_client.c
276
ceph_osd_iter_init(osd_data, iter);
net/ceph/osd_client.c
312
.iter = { .bi_size = bytes },
net/ceph/osd_client.c
348
return osd_data->bvec_pos.iter.bi_size;
net/ceph/osd_client.c
350
return iov_iter_count(&osd_data->iter);
net/ceph/osd_client.c
960
ceph_msg_data_add_iter(msg, &osd_data->iter);
net/core/dev.c
10124
struct list_head *iter;
net/core/dev.c
10137
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/core/dev.c
10409
struct list_head *iter;
net/core/dev.c
10449
netdev_for_each_upper_dev_rcu(dev, upper, iter) {
net/core/dev.c
11007
struct list_head *iter;
net/core/dev.c
11022
netdev_for_each_upper_dev_rcu(dev, upper, iter)
net/core/dev.c
11050
netdev_for_each_lower_dev(dev, lower, iter)
net/core/dev.c
12817
struct list_head *iter;
net/core/dev.c
12823
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/core/dev.c
1826
struct list_head *iter;
net/core/dev.c
1834
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/core/dev.c
8131
struct list_head **iter)
net/core/dev.c
8137
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8142
*iter = &upper->list;
net/core/dev.c
8149
struct list_head **iter,
net/core/dev.c
8154
upper = list_entry((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8159
*iter = &upper->list;
net/core/dev.c
8166
struct list_head **iter)
net/core/dev.c
8172
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8177
*iter = &upper->list;
net/core/dev.c
8188
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
net/core/dev.c
8193
iter = &dev->adj_list.upper;
net/core/dev.c
8204
udev = __netdev_next_upper_dev(now, &iter, &ignore);
net/core/dev.c
8213
iter_stack[cur++] = iter;
net/core/dev.c
8225
iter = niter;
net/core/dev.c
8237
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
net/core/dev.c
8241
iter = &dev->adj_list.upper;
net/core/dev.c
8252
udev = netdev_next_upper_dev_rcu(now, &iter);
net/core/dev.c
8259
iter_stack[cur++] = iter;
net/core/dev.c
8271
iter = niter;
net/core/dev.c
8304
struct list_head **iter)
net/core/dev.c
8308
lower = list_entry(*iter, struct netdev_adjacent, list);
net/core/dev.c
8313
*iter = lower->list.next;
net/core/dev.c
8330
struct list_head **iter)
net/core/dev.c
8336
lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8341
*iter = &lower->list;
net/core/dev.c
8358
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
net/core/dev.c
8362
lower = list_entry(*iter, struct netdev_adjacent, list);
net/core/dev.c
8367
*iter = lower->list.next;
net/core/dev.c
8374
struct list_head **iter)
net/core/dev.c
8378
lower = list_entry((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8383
*iter = &lower->list;
net/core/dev.c
8389
struct list_head **iter,
net/core/dev.c
8394
lower = list_entry((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8399
*iter = &lower->list;
net/core/dev.c
8411
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
net/core/dev.c
8415
iter = &dev->adj_list.lower;
net/core/dev.c
8426
ldev = netdev_next_lower_dev(now, &iter);
net/core/dev.c
8433
iter_stack[cur++] = iter;
net/core/dev.c
8445
iter = niter;
net/core/dev.c
8458
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
net/core/dev.c
8463
iter = &dev->adj_list.lower;
net/core/dev.c
8474
ldev = __netdev_next_lower_dev(now, &iter, &ignore);
net/core/dev.c
8483
iter_stack[cur++] = iter;
net/core/dev.c
8495
iter = niter;
net/core/dev.c
8502
struct list_head **iter)
net/core/dev.c
8506
lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
net/core/dev.c
8510
*iter = &lower->list;
net/core/dev.c
8519
struct list_head *iter;
net/core/dev.c
8523
for (iter = &dev->adj_list.upper,
net/core/dev.c
8524
udev = __netdev_next_upper_dev(dev, &iter, &ignore);
net/core/dev.c
8526
udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
net/core/dev.c
8539
struct list_head *iter;
net/core/dev.c
8543
for (iter = &dev->adj_list.lower,
net/core/dev.c
8544
ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
net/core/dev.c
8546
ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
net/core/dev.c
8596
struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
net/core/dev.c
8600
iter = &dev->adj_list.lower;
net/core/dev.c
8611
ldev = netdev_next_lower_dev_rcu(now, &iter);
net/core/dev.c
8618
iter_stack[cur++] = iter;
net/core/dev.c
8630
iter = niter;
net/core/dev.c
9448
struct netdev_adjacent *iter;
net/core/dev.c
9452
list_for_each_entry(iter, &dev->adj_list.upper, list) {
net/core/dev.c
9453
if (!net_eq(net, dev_net(iter->dev)))
net/core/dev.c
9455
netdev_adjacent_sysfs_add(iter->dev, dev,
net/core/dev.c
9456
&iter->dev->adj_list.lower);
net/core/dev.c
9457
netdev_adjacent_sysfs_add(dev, iter->dev,
net/core/dev.c
9461
list_for_each_entry(iter, &dev->adj_list.lower, list) {
net/core/dev.c
9462
if (!net_eq(net, dev_net(iter->dev)))
net/core/dev.c
9464
netdev_adjacent_sysfs_add(iter->dev, dev,
net/core/dev.c
9465
&iter->dev->adj_list.upper);
net/core/dev.c
9466
netdev_adjacent_sysfs_add(dev, iter->dev,
net/core/dev.c
9473
struct netdev_adjacent *iter;
net/core/dev.c
9477
list_for_each_entry(iter, &dev->adj_list.upper, list) {
net/core/dev.c
9478
if (!net_eq(net, dev_net(iter->dev)))
net/core/dev.c
9480
netdev_adjacent_sysfs_del(iter->dev, dev->name,
net/core/dev.c
9481
&iter->dev->adj_list.lower);
net/core/dev.c
9482
netdev_adjacent_sysfs_del(dev, iter->dev->name,
net/core/dev.c
9486
list_for_each_entry(iter, &dev->adj_list.lower, list) {
net/core/dev.c
9487
if (!net_eq(net, dev_net(iter->dev)))
net/core/dev.c
9489
netdev_adjacent_sysfs_del(iter->dev, dev->name,
net/core/dev.c
9490
&iter->dev->adj_list.upper);
net/core/dev.c
9491
netdev_adjacent_sysfs_del(dev, iter->dev->name,
net/core/dev.c
9498
struct netdev_adjacent *iter;
net/core/dev.c
9502
list_for_each_entry(iter, &dev->adj_list.upper, list) {
net/core/dev.c
9503
if (!net_eq(net, dev_net(iter->dev)))
net/core/dev.c
9505
netdev_adjacent_sysfs_del(iter->dev, oldname,
net/core/dev.c
9506
&iter->dev->adj_list.lower);
net/core/dev.c
9507
netdev_adjacent_sysfs_add(iter->dev, dev,
net/core/dev.c
9508
&iter->dev->adj_list.lower);
net/core/dev.c
9511
list_for_each_entry(iter, &dev->adj_list.lower, list) {
net/core/dev.c
9512
if (!net_eq(net, dev_net(iter->dev)))
net/core/dev.c
9514
netdev_adjacent_sysfs_del(iter->dev, oldname,
net/core/dev.c
9515
&iter->dev->adj_list.upper);
net/core/dev.c
9516
netdev_adjacent_sysfs_add(iter->dev, dev,
net/core/dev.c
9517
&iter->dev->adj_list.upper);
net/core/gso.c
227
const struct sk_buff *iter;
net/core/gso.c
235
skb_walk_frags(skb, iter) {
net/core/gso.c
236
if (seg_len + skb_headlen(iter) > max_len)
net/core/net-procfs.c
180
struct ptype_iter_state *iter = seq->private;
net/core/net-procfs.c
191
iter->dev = dev;
net/core/net-procfs.c
198
iter->dev = NULL;
net/core/net-procfs.c
231
struct ptype_iter_state *iter = seq->private;
net/core/net-procfs.c
244
dev = iter->dev;
net/core/net-procfs.c
252
iter->dev = dev;
net/core/net-procfs.c
256
iter->dev = NULL;
net/core/net-procfs.c
295
struct ptype_iter_state *iter = seq->private;
net/core/net-procfs.c
303
dev = iter->dev;
net/core/skbuff.c
3200
struct sk_buff *iter;
net/core/skbuff.c
3234
skb_walk_frags(skb, iter) {
net/core/skbuff.c
3235
if (*offset >= iter->len) {
net/core/skbuff.c
3236
*offset -= iter->len;
net/core/skbuff.c
3243
if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
net/core/skbuff.c
4818
struct sk_buff *iter;
net/core/skbuff.c
4834
skb_walk_frags(head_skb, iter) {
net/core/skbuff.c
4835
if (frag_len != iter->len && iter->next)
net/core/skbuff.c
4837
if (skb_headlen(iter) && !iter->head_frag)
net/core/skbuff.c
4840
len -= iter->len;
net/core/skbuff.c
5076
struct sk_buff *iter;
net/core/skbuff.c
5087
for (iter = segs; iter; iter = iter->next) {
net/core/skbuff.c
5088
skb_shinfo(iter)->gso_size = gso_size;
net/core/skbuff.c
5089
skb_shinfo(iter)->gso_segs = partial_segs;
net/core/skbuff.c
5090
skb_shinfo(iter)->gso_type = type;
net/core/skbuff.c
5091
SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
net/core/skbuff.c
7345
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
net/core/skbuff.c
7353
while (iter->count > 0) {
net/core/skbuff.c
7365
len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off);
net/core/skbuff.c
7383
iov_iter_revert(iter, len);
net/core/skmsg.c
415
struct iov_iter *iter = &msg->msg_iter;
net/core/skmsg.c
443
copy = copy_page_to_iter(page, sge->offset, copy, iter);
net/core/xdp.c
100
rhashtable_walk_enter(mem_id_ht, &iter);
net/core/xdp.c
102
rhashtable_walk_start(&iter);
net/core/xdp.c
104
while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
net/core/xdp.c
109
rhashtable_walk_stop(&iter);
net/core/xdp.c
112
rhashtable_walk_exit(&iter);
net/core/xdp.c
96
struct rhashtable_iter iter;
net/dsa/conduit.c
540
struct list_head *iter;
net/dsa/conduit.c
544
netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
net/dsa/port.c
741
struct list_head *iter;
net/dsa/port.c
743
netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
net/dsa/user.c
2905
struct list_head *iter;
net/dsa/user.c
2923
netdev_for_each_upper_dev_rcu(conduit, upper, iter) {
net/dsa/user.c
3061
struct list_head *iter;
net/dsa/user.c
3068
netdev_for_each_lower_dev(dev, lower, iter) {
net/dsa/user.c
3093
struct list_head *iter;
net/dsa/user.c
3100
netdev_for_each_lower_dev(dev, lower, iter) {
net/dsa/user.c
3281
struct list_head *iter;
net/dsa/user.c
3295
netdev_for_each_lower_dev(lag_dev, lower, iter) {
net/dsa/user.c
3324
struct list_head *iter;
net/dsa/user.c
3334
netdev_for_each_lower_dev(br, lower, iter) {
net/dsa/user.c
3418
struct list_head *iter;
net/dsa/user.c
3420
netdev_for_each_lower_dev(lag_dev, lower, iter) {
net/dsa/user.c
660
struct list_head *iter;
net/dsa/user.c
662
netdev_for_each_upper_dev_rcu(user, upper_dev, iter) {
net/ipv4/cipso_ipv4.c
1011
u32 iter;
net/ipv4/cipso_ipv4.c
1016
for (iter = 0; iter < rngcat_len; iter += 4) {
net/ipv4/cipso_ipv4.c
1017
cat_high = get_unaligned_be16(&rngcat[iter]);
net/ipv4/cipso_ipv4.c
1018
if ((iter + 4) <= rngcat_len)
net/ipv4/cipso_ipv4.c
1019
cat_low = get_unaligned_be16(&rngcat[iter + 2]);
net/ipv4/cipso_ipv4.c
1051
int iter = -1;
net/ipv4/cipso_ipv4.c
1062
iter = netlbl_catmap_walk(secattr->attr.mls.cat, iter + 1);
net/ipv4/cipso_ipv4.c
1063
if (iter < 0)
net/ipv4/cipso_ipv4.c
1065
cat_size += (iter == 0 ? 0 : sizeof(u16));
net/ipv4/cipso_ipv4.c
1068
array[array_cnt++] = iter;
net/ipv4/cipso_ipv4.c
1070
iter = netlbl_catmap_walkrng(secattr->attr.mls.cat, iter);
net/ipv4/cipso_ipv4.c
1071
if (iter < 0)
net/ipv4/cipso_ipv4.c
1076
array[array_cnt++] = iter;
net/ipv4/cipso_ipv4.c
1079
for (iter = 0; array_cnt > 0;) {
net/ipv4/cipso_ipv4.c
1080
*((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
net/ipv4/cipso_ipv4.c
1081
iter += 2;
net/ipv4/cipso_ipv4.c
1084
*((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
net/ipv4/cipso_ipv4.c
1085
iter += 2;
net/ipv4/cipso_ipv4.c
169
u32 iter;
net/ipv4/cipso_ipv4.c
176
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
net/ipv4/cipso_ipv4.c
1761
u32 iter;
net/ipv4/cipso_ipv4.c
1769
iter = 0;
net/ipv4/cipso_ipv4.c
177
spin_lock_init(&cipso_v4_cache[iter].lock);
net/ipv4/cipso_ipv4.c
1772
switch (doi_def->tags[iter]) {
net/ipv4/cipso_ipv4.c
178
cipso_v4_cache[iter].size = 0;
net/ipv4/cipso_ipv4.c
179
INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
net/ipv4/cipso_ipv4.c
1801
iter++;
net/ipv4/cipso_ipv4.c
1803
iter < CIPSO_V4_TAG_MAXCNT &&
net/ipv4/cipso_ipv4.c
1804
doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
net/ipv4/cipso_ipv4.c
1813
int iter = 0, optlen = 0;
net/ipv4/cipso_ipv4.c
1821
while (iter < len) {
net/ipv4/cipso_ipv4.c
1822
if (data[iter] == IPOPT_END) {
net/ipv4/cipso_ipv4.c
1824
} else if (data[iter] == IPOPT_NOP) {
net/ipv4/cipso_ipv4.c
1825
iter++;
net/ipv4/cipso_ipv4.c
1827
iter += data[iter + 1];
net/ipv4/cipso_ipv4.c
1828
optlen = iter;
net/ipv4/cipso_ipv4.c
195
u32 iter;
net/ipv4/cipso_ipv4.c
197
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
net/ipv4/cipso_ipv4.c
198
spin_lock_bh(&cipso_v4_cache[iter].lock);
net/ipv4/cipso_ipv4.c
201
&cipso_v4_cache[iter].list, list) {
net/ipv4/cipso_ipv4.c
205
cipso_v4_cache[iter].size = 0;
net/ipv4/cipso_ipv4.c
206
spin_unlock_bh(&cipso_v4_cache[iter].lock);
net/ipv4/cipso_ipv4.c
360
struct cipso_v4_doi *iter;
net/ipv4/cipso_ipv4.c
362
list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
net/ipv4/cipso_ipv4.c
363
if (iter->doi == doi && refcount_read(&iter->refcount))
net/ipv4/cipso_ipv4.c
364
return iter;
net/ipv4/cipso_ipv4.c
385
u32 iter;
net/ipv4/cipso_ipv4.c
395
for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
net/ipv4/cipso_ipv4.c
396
switch (doi_def->tags[iter]) {
net/ipv4/cipso_ipv4.c
409
if (iter == 0)
net/ipv4/cipso_ipv4.c
909
u32 iter;
net/ipv4/cipso_ipv4.c
914
for (iter = 0; iter < enumcat_len; iter += 2) {
net/ipv4/cipso_ipv4.c
915
cat = get_unaligned_be16(&enumcat[iter]);
net/ipv4/cipso_ipv4.c
979
u32 iter;
net/ipv4/cipso_ipv4.c
981
for (iter = 0; iter < net_cat_len; iter += 2) {
net/ipv4/cipso_ipv4.c
983
get_unaligned_be16(&net_cat[iter]),
net/ipv4/fib_trie.c
2428
static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
net/ipv4/fib_trie.c
2430
unsigned long cindex = iter->index;
net/ipv4/fib_trie.c
2431
struct key_vector *pn = iter->tnode;
net/ipv4/fib_trie.c
2435
iter->tnode, iter->index, iter->depth);
net/ipv4/fib_trie.c
2445
iter->tnode = pn;
net/ipv4/fib_trie.c
2446
iter->index = cindex;
net/ipv4/fib_trie.c
2449
iter->tnode = n;
net/ipv4/fib_trie.c
2450
iter->index = 0;
net/ipv4/fib_trie.c
2451
++iter->depth;
net/ipv4/fib_trie.c
2461
--iter->depth;
net/ipv4/fib_trie.c
2465
iter->tnode = pn;
net/ipv4/fib_trie.c
2466
iter->index = 0;
net/ipv4/fib_trie.c
2471
static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
net/ipv4/fib_trie.c
2485
iter->tnode = n;
net/ipv4/fib_trie.c
2486
iter->index = 0;
net/ipv4/fib_trie.c
2487
iter->depth = 1;
net/ipv4/fib_trie.c
2489
iter->tnode = pn;
net/ipv4/fib_trie.c
2490
iter->index = 0;
net/ipv4/fib_trie.c
2491
iter->depth = 0;
net/ipv4/fib_trie.c
2500
struct fib_trie_iter iter;
net/ipv4/fib_trie.c
2505
for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
net/ipv4/fib_trie.c
2510
s->totdepth += iter.depth;
net/ipv4/fib_trie.c
2511
if (iter.depth > s->maxdepth)
net/ipv4/fib_trie.c
2512
s->maxdepth = iter.depth;
net/ipv4/fib_trie.c
2649
struct fib_trie_iter *iter = seq->private;
net/ipv4/fib_trie.c
2661
for (n = fib_trie_get_first(iter,
net/ipv4/fib_trie.c
2663
n; n = fib_trie_get_next(iter))
net/ipv4/fib_trie.c
2665
iter->tb = tb;
net/ipv4/fib_trie.c
2683
struct fib_trie_iter *iter = seq->private;
net/ipv4/fib_trie.c
2685
struct fib_table *tb = iter->tb;
net/ipv4/fib_trie.c
2692
n = fib_trie_get_next(iter);
net/ipv4/fib_trie.c
2700
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
net/ipv4/fib_trie.c
2709
n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
net/ipv4/fib_trie.c
2717
iter->tb = tb;
net/ipv4/fib_trie.c
2773
const struct fib_trie_iter *iter = seq->private;
net/ipv4/fib_trie.c
2777
fib_table_print(seq, iter->tb);
net/ipv4/fib_trie.c
2782
seq_indent(seq, iter->depth-1);
net/ipv4/fib_trie.c
2791
seq_indent(seq, iter->depth);
net/ipv4/fib_trie.c
2797
seq_indent(seq, iter->depth + 1);
net/ipv4/fib_trie.c
2829
static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
net/ipv4/fib_trie.c
2832
struct key_vector *l, **tp = &iter->tnode;
net/ipv4/fib_trie.c
2836
if (iter->pos > 0 && pos >= iter->pos) {
net/ipv4/fib_trie.c
2837
key = iter->key;
net/ipv4/fib_trie.c
2839
iter->pos = 1;
net/ipv4/fib_trie.c
2843
pos -= iter->pos;
net/ipv4/fib_trie.c
2847
iter->pos++;
net/ipv4/fib_trie.c
2856
iter->key = l->key; /* remember it */
net/ipv4/fib_trie.c
2858
iter->pos = 0; /* forget it */
net/ipv4/fib_trie.c
2866
struct fib_route_iter *iter = seq->private;
net/ipv4/fib_trie.c
2876
iter->main_tb = tb;
net/ipv4/fib_trie.c
2878
iter->tnode = t->kv;
net/ipv4/fib_trie.c
2881
return fib_route_get_idx(iter, *pos);
net/ipv4/fib_trie.c
2883
iter->pos = 0;
net/ipv4/fib_trie.c
2884
iter->key = KEY_MAX;
net/ipv4/fib_trie.c
2891
struct fib_route_iter *iter = seq->private;
net/ipv4/fib_trie.c
2893
t_key key = iter->key + 1;
net/ipv4/fib_trie.c
2899
l = leaf_walk_rcu(&iter->tnode, key);
net/ipv4/fib_trie.c
2902
iter->key = l->key;
net/ipv4/fib_trie.c
2903
iter->pos++;
net/ipv4/fib_trie.c
2905
iter->pos = 0;
net/ipv4/fib_trie.c
2943
struct fib_route_iter *iter = seq->private;
net/ipv4/fib_trie.c
2944
struct fib_table *tb = iter->main_tb;
net/ipv4/ip_output.c
599
unsigned int hlen, struct ip_fraglist_iter *iter)
net/ipv4/ip_output.c
603
iter->frag = skb_shinfo(skb)->frag_list;
net/ipv4/ip_output.c
606
iter->offset = 0;
net/ipv4/ip_output.c
607
iter->iph = iph;
net/ipv4/ip_output.c
608
iter->hlen = hlen;
net/ipv4/ip_output.c
618
void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
net/ipv4/ip_output.c
620
unsigned int hlen = iter->hlen;
net/ipv4/ip_output.c
621
struct iphdr *iph = iter->iph;
net/ipv4/ip_output.c
624
frag = iter->frag;
net/ipv4/ip_output.c
630
iter->iph = ip_hdr(frag);
net/ipv4/ip_output.c
631
iph = iter->iph;
net/ipv4/ip_output.c
634
iter->offset += skb->len - hlen;
net/ipv4/ip_output.c
635
iph->frag_off = htons(iter->offset >> 3);
net/ipv4/ip_output.c
769
struct ip_fraglist_iter iter;
net/ipv4/ip_output.c
836
ip_fraglist_init(skb, iph, hlen, &iter);
net/ipv4/ip_output.c
841
if (iter.frag) {
net/ipv4/ip_output.c
842
bool first_frag = (iter.offset == 0);
net/ipv4/ip_output.c
844
IPCB(iter.frag)->flags = IPCB(skb)->flags;
net/ipv4/ip_output.c
845
ip_fraglist_prepare(skb, &iter);
net/ipv4/ip_output.c
851
IPCB(iter.frag)->opt.optlen =
net/ipv4/ip_output.c
853
ip_options_fragment(iter.frag);
net/ipv4/ip_output.c
854
ip_send_check(iter.iph);
net/ipv4/ip_output.c
863
if (err || !iter.frag)
net/ipv4/ip_output.c
866
skb = ip_fraglist_next(&iter);
net/ipv4/ip_output.c
874
kfree_skb_list(iter.frag);
net/ipv4/ipconfig.c
329
struct list_head *iter;
net/ipv4/ipconfig.c
335
netdev_for_each_lower_dev(selected_dev, lower, iter) {
net/ipv4/ipmr.c
3065
struct mr_vif_iter *iter = seq->private;
net/ipv4/ipmr.c
3076
iter->mrt = mrt;
net/ipv4/ipmr.c
3089
struct mr_vif_iter *iter = seq->private;
net/ipv4/ipmr.c
3090
struct mr_table *mrt = iter->mrt;
net/ipv4/ipmr_base.c
114
void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
net/ipv4/ipmr_base.c
116
struct mr_table *mrt = iter->mrt;
net/ipv4/ipmr_base.c
118
for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
net/ipv4/ipmr_base.c
119
if (!VIF_EXISTS(mrt, iter->ct))
net/ipv4/ipmr_base.c
122
return &mrt->vif_table[iter->ct];
net/ipv4/ipmr_base.c
130
struct mr_vif_iter *iter = seq->private;
net/ipv4/ipmr_base.c
132
struct mr_table *mrt = iter->mrt;
net/ipv4/ipmr_base.c
136
return mr_vif_seq_idx(net, iter, 0);
net/ipv4/ipmr_base.c
138
while (++iter->ct < mrt->maxvif) {
net/ipv4/ipmr_base.c
139
if (!VIF_EXISTS(mrt, iter->ct))
net/ipv4/ipmr_base.c
141
return &mrt->vif_table[iter->ct];
net/ipv4/ipmr_base.c
352
struct mr_table *(*iter)(struct net *net,
net/ipv4/ipmr_base.c
375
for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
net/ipv4/netfilter/arp_tables.c
1011
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
1034
xt_entry_foreach(iter, private->entries, private->size) {
net/ipv4/netfilter/arp_tables.c
1037
tmp = xt_get_this_cpu_counter(&iter->counters);
net/ipv4/netfilter/arp_tables.c
1259
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
1298
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv4/netfilter/arp_tables.c
1299
cleanup_entry(iter, net);
net/ipv4/netfilter/arp_tables.c
1349
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
1357
xt_entry_foreach(iter, private->entries, total_size) {
net/ipv4/netfilter/arp_tables.c
1358
ret = compat_copy_entry_to_user(iter, &pos,
net/ipv4/netfilter/arp_tables.c
1495
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
1501
xt_entry_foreach(iter, loc_cpu_entry, private->size)
net/ipv4/netfilter/arp_tables.c
1502
cleanup_entry(iter, net);
net/ipv4/netfilter/arp_tables.c
1536
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
1538
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv4/netfilter/arp_tables.c
1539
cleanup_entry(iter, net);
net/ipv4/netfilter/arp_tables.c
525
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
545
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv4/netfilter/arp_tables.c
546
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
net/ipv4/netfilter/arp_tables.c
554
offsets[i] = (void *)iter - entry0;
net/ipv4/netfilter/arp_tables.c
556
if (strcmp(arpt_get_target(iter)->u.user.name,
net/ipv4/netfilter/arp_tables.c
577
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv4/netfilter/arp_tables.c
578
ret = find_check_entry(iter, net, repl->name, repl->size,
net/ipv4/netfilter/arp_tables.c
586
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv4/netfilter/arp_tables.c
589
cleanup_entry(iter, net);
net/ipv4/netfilter/arp_tables.c
603
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
611
xt_entry_foreach(iter, t->entries, t->size) {
net/ipv4/netfilter/arp_tables.c
616
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/arp_tables.c
633
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
638
xt_entry_foreach(iter, t->entries, t->size) {
net/ipv4/netfilter/arp_tables.c
641
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/arp_tables.c
768
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
782
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
net/ipv4/netfilter/arp_tables.c
783
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
net/ipv4/netfilter/arp_tables.c
891
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
930
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
net/ipv4/netfilter/arp_tables.c
931
cleanup_entry(iter, net);
net/ipv4/netfilter/arp_tables.c
957
struct arpt_entry *iter;
net/ipv4/netfilter/arp_tables.c
996
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv4/netfilter/arp_tables.c
997
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
1044
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1081
xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
net/ipv4/netfilter/ip_tables.c
1082
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
1109
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1148
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv4/netfilter/ip_tables.c
1149
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
1164
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1186
xt_entry_foreach(iter, private->entries, private->size) {
net/ipv4/netfilter/ip_tables.c
1189
tmp = xt_get_this_cpu_counter(&iter->counters);
net/ipv4/netfilter/ip_tables.c
1497
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1536
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv4/netfilter/ip_tables.c
1537
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
1559
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1567
xt_entry_foreach(iter, private->entries, total_size) {
net/ipv4/netfilter/ip_tables.c
1568
ret = compat_copy_entry_to_user(iter, &pos,
net/ipv4/netfilter/ip_tables.c
1710
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1716
xt_entry_foreach(iter, loc_cpu_entry, private->size)
net/ipv4/netfilter/ip_tables.c
1717
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
1750
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
1752
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv4/netfilter/ip_tables.c
1753
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
196
const struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
204
xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
net/ipv4/netfilter/ip_tables.c
205
if (get_chainname_rulenum(iter, e, hookname,
net/ipv4/netfilter/ip_tables.c
664
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
683
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv4/netfilter/ip_tables.c
684
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
net/ipv4/netfilter/ip_tables.c
692
offsets[i] = (void *)iter - entry0;
net/ipv4/netfilter/ip_tables.c
694
if (strcmp(ipt_get_target(iter)->u.user.name,
net/ipv4/netfilter/ip_tables.c
715
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv4/netfilter/ip_tables.c
716
ret = find_check_entry(iter, net, repl->name, repl->size,
net/ipv4/netfilter/ip_tables.c
724
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv4/netfilter/ip_tables.c
727
cleanup_entry(iter, net);
net/ipv4/netfilter/ip_tables.c
742
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
750
xt_entry_foreach(iter, t->entries, t->size) {
net/ipv4/netfilter/ip_tables.c
755
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/ip_tables.c
772
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
777
xt_entry_foreach(iter, t->entries, t->size) {
net/ipv4/netfilter/ip_tables.c
780
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv4/netfilter/ip_tables.c
923
struct ipt_entry *iter;
net/ipv4/netfilter/ip_tables.c
937
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
net/ipv4/netfilter/ip_tables.c
938
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
net/ipv4/tcp_ipv4.c
2988
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
net/ipv4/tcp_ipv4.c
2991
unsigned int cur_sk = iter->cur_sk;
net/ipv4/tcp_ipv4.c
2997
while (cur_sk < iter->end_sk) {
net/ipv4/tcp_ipv4.c
2998
item = &iter->batch[cur_sk++];
net/ipv4/tcp_ipv4.c
3005
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
net/ipv4/tcp_ipv4.c
3015
memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk);
net/ipv4/tcp_ipv4.c
3016
kvfree(iter->batch);
net/ipv4/tcp_ipv4.c
3017
iter->batch = new_batch;
net/ipv4/tcp_ipv4.c
3018
iter->max_sk = new_batch_sz;
net/ipv4/tcp_ipv4.c
3044
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3045
struct tcp_iter_state *st = &iter->state;
net/ipv4/tcp_ipv4.c
3046
unsigned int find_cookie = iter->cur_sk;
net/ipv4/tcp_ipv4.c
3047
unsigned int end_cookie = iter->end_sk;
net/ipv4/tcp_ipv4.c
3055
iter->cur_sk = 0;
net/ipv4/tcp_ipv4.c
3056
iter->end_sk = 0;
net/ipv4/tcp_ipv4.c
3059
sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
net/ipv4/tcp_ipv4.c
3074
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3075
struct tcp_iter_state *st = &iter->state;
net/ipv4/tcp_ipv4.c
3076
unsigned int find_cookie = iter->cur_sk;
net/ipv4/tcp_ipv4.c
3077
unsigned int end_cookie = iter->end_sk;
net/ipv4/tcp_ipv4.c
3085
iter->cur_sk = 0;
net/ipv4/tcp_ipv4.c
3086
iter->end_sk = 0;
net/ipv4/tcp_ipv4.c
3089
sk = bpf_iter_tcp_resume_bucket(sk, &iter->batch[find_cookie],
net/ipv4/tcp_ipv4.c
3103
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3104
struct tcp_iter_state *st = &iter->state;
net/ipv4/tcp_ipv4.c
3126
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3132
iter->batch[iter->end_sk++].sk = *start_sk;
net/ipv4/tcp_ipv4.c
3138
if (iter->end_sk < iter->max_sk) {
net/ipv4/tcp_ipv4.c
3140
iter->batch[iter->end_sk++].sk = sk;
net/ipv4/tcp_ipv4.c
3155
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3161
iter->batch[iter->end_sk++].sk = *start_sk;
net/ipv4/tcp_ipv4.c
3167
if (iter->end_sk < iter->max_sk) {
net/ipv4/tcp_ipv4.c
3169
iter->batch[iter->end_sk++].sk = sk;
net/ipv4/tcp_ipv4.c
3184
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3185
struct tcp_iter_state *st = &iter->state;
net/ipv4/tcp_ipv4.c
3196
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3197
struct tcp_iter_state *st = &iter->state;
net/ipv4/tcp_ipv4.c
3207
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3217
if (likely(iter->end_sk == expected))
net/ipv4/tcp_ipv4.c
3222
bpf_iter_tcp_put_batch(iter);
net/ipv4/tcp_ipv4.c
3223
err = bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2,
net/ipv4/tcp_ipv4.c
3233
if (likely(iter->end_sk == expected))
net/ipv4/tcp_ipv4.c
3240
err = bpf_iter_tcp_realloc_batch(iter, expected, GFP_NOWAIT);
net/ipv4/tcp_ipv4.c
3247
WARN_ON_ONCE(iter->end_sk != expected);
net/ipv4/tcp_ipv4.c
3250
return iter->batch[0].sk;
net/ipv4/tcp_ipv4.c
3266
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3267
struct tcp_iter_state *st = &iter->state;
net/ipv4/tcp_ipv4.c
3274
if (iter->cur_sk < iter->end_sk) {
net/ipv4/tcp_ipv4.c
3280
sock_gen_put(iter->batch[iter->cur_sk++].sk);
net/ipv4/tcp_ipv4.c
3283
if (iter->cur_sk < iter->end_sk)
net/ipv4/tcp_ipv4.c
3284
sk = iter->batch[iter->cur_sk].sk;
net/ipv4/tcp_ipv4.c
3339
struct bpf_tcp_iter_state *iter = seq->private;
net/ipv4/tcp_ipv4.c
3350
if (iter->cur_sk < iter->end_sk)
net/ipv4/tcp_ipv4.c
3351
bpf_iter_tcp_put_batch(iter);
net/ipv4/tcp_ipv4.c
3653
struct bpf_tcp_iter_state *iter = priv_data;
net/ipv4/tcp_ipv4.c
3660
err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER);
net/ipv4/tcp_ipv4.c
3671
struct bpf_tcp_iter_state *iter = priv_data;
net/ipv4/tcp_ipv4.c
3674
kvfree(iter->batch);
net/ipv4/udp.c
3484
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
net/ipv4/udp.c
3505
struct bpf_udp_iter_state *iter = seq->private;
net/ipv4/udp.c
3506
struct udp_iter_state *state = &iter->state;
net/ipv4/udp.c
3519
if (iter->cur_sk == iter->end_sk)
net/ipv4/udp.c
3532
find_cookie = iter->cur_sk;
net/ipv4/udp.c
3533
end_cookie = iter->end_sk;
net/ipv4/udp.c
3534
iter->cur_sk = 0;
net/ipv4/udp.c
3535
iter->end_sk = 0;
net/ipv4/udp.c
3554
sk = bpf_iter_udp_resume(sk, &iter->batch[find_cookie],
net/ipv4/udp.c
3559
if (iter->end_sk < iter->max_sk) {
net/ipv4/udp.c
3561
iter->batch[iter->end_sk++].sk = sk;
net/ipv4/udp.c
3568
if (unlikely(resizes <= 1 && iter->end_sk &&
net/ipv4/udp.c
3569
iter->end_sk != batch_sks)) {
net/ipv4/udp.c
3577
err = bpf_iter_udp_realloc_batch(iter,
net/ipv4/udp.c
3589
err = bpf_iter_udp_realloc_batch(iter, batch_sks,
net/ipv4/udp.c
3597
sk = iter->batch[iter->end_sk - 1].sk;
net/ipv4/udp.c
3601
batch_sks = iter->end_sk;
net/ipv4/udp.c
3607
if (iter->end_sk)
net/ipv4/udp.c
3613
WARN_ON_ONCE(iter->end_sk != batch_sks);
net/ipv4/udp.c
3614
return iter->end_sk ? iter->batch[0].sk : NULL;
net/ipv4/udp.c
3619
struct bpf_udp_iter_state *iter = seq->private;
net/ipv4/udp.c
3625
if (iter->cur_sk < iter->end_sk)
net/ipv4/udp.c
3626
sock_put(iter->batch[iter->cur_sk++].sk);
net/ipv4/udp.c
3631
if (iter->cur_sk < iter->end_sk)
net/ipv4/udp.c
3632
sk = iter->batch[iter->cur_sk].sk;
net/ipv4/udp.c
3694
static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter)
net/ipv4/udp.c
3697
unsigned int cur_sk = iter->cur_sk;
net/ipv4/udp.c
3703
while (cur_sk < iter->end_sk) {
net/ipv4/udp.c
3704
item = &iter->batch[cur_sk++];
net/ipv4/udp.c
3713
struct bpf_udp_iter_state *iter = seq->private;
net/ipv4/udp.c
3724
if (iter->cur_sk < iter->end_sk)
net/ipv4/udp.c
3725
bpf_iter_udp_put_batch(iter);
net/ipv4/udp.c
3975
static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter,
net/ipv4/udp.c
3986
bpf_iter_udp_put_batch(iter);
net/ipv4/udp.c
3988
memcpy(new_batch, iter->batch, sizeof(*iter->batch) * iter->end_sk);
net/ipv4/udp.c
3989
kvfree(iter->batch);
net/ipv4/udp.c
3990
iter->batch = new_batch;
net/ipv4/udp.c
3991
iter->max_sk = new_batch_sz;
net/ipv4/udp.c
4000
struct bpf_udp_iter_state *iter = priv_data;
net/ipv4/udp.c
4007
ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ, GFP_USER);
net/ipv4/udp.c
4011
iter->state.bucket = -1;
net/ipv4/udp.c
4018
struct bpf_udp_iter_state *iter = priv_data;
net/ipv4/udp.c
4021
kvfree(iter->batch);
net/ipv6/calipso.c
134
u32 iter;
net/ipv6/calipso.c
141
for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) {
net/ipv6/calipso.c
142
spin_lock_init(&calipso_cache[iter].lock);
net/ipv6/calipso.c
143
calipso_cache[iter].size = 0;
net/ipv6/calipso.c
144
INIT_LIST_HEAD(&calipso_cache[iter].list);
net/ipv6/calipso.c
161
u32 iter;
net/ipv6/calipso.c
163
for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) {
net/ipv6/calipso.c
164
spin_lock_bh(&calipso_cache[iter].lock);
net/ipv6/calipso.c
167
&calipso_cache[iter].list, list) {
net/ipv6/calipso.c
171
calipso_cache[iter].size = 0;
net/ipv6/calipso.c
172
spin_unlock_bh(&calipso_cache[iter].lock);
net/ipv6/calipso.c
326
struct calipso_doi *iter;
net/ipv6/calipso.c
328
list_for_each_entry_rcu(iter, &calipso_doi_list, list)
net/ipv6/calipso.c
329
if (iter->doi == doi && refcount_read(&iter->refcount))
net/ipv6/calipso.c
330
return iter;
net/ipv6/ila/ila_xlat.c
383
struct rhashtable_iter iter;
net/ipv6/ila/ila_xlat.c
388
rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter);
net/ipv6/ila/ila_xlat.c
389
rhashtable_walk_start(&iter);
net/ipv6/ila/ila_xlat.c
392
ila = rhashtable_walk_next(&iter);
net/ipv6/ila/ila_xlat.c
419
rhashtable_walk_stop(&iter);
net/ipv6/ila/ila_xlat.c
420
rhashtable_walk_exit(&iter);
net/ipv6/ila/ila_xlat.c
510
struct ila_dump_iter *iter;
net/ipv6/ila/ila_xlat.c
512
iter = kmalloc_obj(*iter);
net/ipv6/ila/ila_xlat.c
513
if (!iter)
net/ipv6/ila/ila_xlat.c
516
rhashtable_walk_enter(&ilan->xlat.rhash_table, &iter->rhiter);
net/ipv6/ila/ila_xlat.c
518
iter->skip = 0;
net/ipv6/ila/ila_xlat.c
519
cb->args[0] = (long)iter;
net/ipv6/ila/ila_xlat.c
526
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
net/ipv6/ila/ila_xlat.c
528
rhashtable_walk_exit(&iter->rhiter);
net/ipv6/ila/ila_xlat.c
530
kfree(iter);
net/ipv6/ila/ila_xlat.c
537
struct ila_dump_iter *iter = (struct ila_dump_iter *)cb->args[0];
net/ipv6/ila/ila_xlat.c
538
struct rhashtable_iter *rhiter = &iter->rhiter;
net/ipv6/ila/ila_xlat.c
539
int skip = iter->skip;
net/ipv6/ila/ila_xlat.c
597
iter->skip = skip;
net/ipv6/ioam6.c
245
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
247
if (!iter) {
net/ipv6/ioam6.c
248
iter = kmalloc_obj(*iter);
net/ipv6/ioam6.c
249
if (!iter)
net/ipv6/ioam6.c
252
cb->args[0] = (long)iter;
net/ipv6/ioam6.c
255
rhashtable_walk_enter(&nsdata->namespaces, iter);
net/ipv6/ioam6.c
262
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
264
rhashtable_walk_exit(iter);
net/ipv6/ioam6.c
265
kfree(iter);
net/ipv6/ioam6.c
272
struct rhashtable_iter *iter;
net/ipv6/ioam6.c
276
iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
277
rhashtable_walk_start(iter);
net/ipv6/ioam6.c
280
ns = rhashtable_walk_next(iter);
net/ipv6/ioam6.c
304
rhashtable_walk_stop(iter);
net/ipv6/ioam6.c
431
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
433
if (!iter) {
net/ipv6/ioam6.c
434
iter = kmalloc_obj(*iter);
net/ipv6/ioam6.c
435
if (!iter)
net/ipv6/ioam6.c
438
cb->args[0] = (long)iter;
net/ipv6/ioam6.c
441
rhashtable_walk_enter(&nsdata->schemas, iter);
net/ipv6/ioam6.c
448
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
450
rhashtable_walk_exit(iter);
net/ipv6/ioam6.c
451
kfree(iter);
net/ipv6/ioam6.c
458
struct rhashtable_iter *iter;
net/ipv6/ioam6.c
462
iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/ioam6.c
463
rhashtable_walk_start(iter);
net/ipv6/ioam6.c
466
sc = rhashtable_walk_next(iter);
net/ipv6/ioam6.c
490
rhashtable_walk_stop(iter);
net/ipv6/ip6_fib.c
1094
struct fib6_info *iter = NULL;
net/ipv6/ip6_fib.c
1112
for (iter = leaf; iter;
net/ipv6/ip6_fib.c
1113
iter = rcu_dereference_protected(iter->fib6_next,
net/ipv6/ip6_fib.c
1119
if (iter->fib6_metric == rt->fib6_metric) {
net/ipv6/ip6_fib.c
1129
if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
net/ipv6/ip6_fib.c
1137
if (rt6_duplicate_nexthop(iter, rt)) {
net/ipv6/ip6_fib.c
1140
if (!(iter->fib6_flags & RTF_EXPIRES))
net/ipv6/ip6_fib.c
1143
fib6_clean_expires(iter);
net/ipv6/ip6_fib.c
1144
fib6_may_remove_gc_list(info->nl_net, iter);
net/ipv6/ip6_fib.c
1146
fib6_set_expires(iter, rt->expires);
net/ipv6/ip6_fib.c
1147
fib6_add_gc_list(iter);
net/ipv6/ip6_fib.c
1150
(iter->nh || !iter->fib6_nh->fib_nh_gw_family)) {
net/ipv6/ip6_fib.c
1151
iter->fib6_flags &= ~RTF_ADDRCONF;
net/ipv6/ip6_fib.c
1152
iter->fib6_flags &= ~RTF_PREFIX_RT;
net/ipv6/ip6_fib.c
1156
fib6_metric_set(iter, RTAX_MTU,
net/ipv6/ip6_fib.c
1172
rt6_qualify_for_ecmp(iter))
net/ipv6/ip6_fib.c
1177
if (iter->fib6_metric > rt->fib6_metric)
net/ipv6/ip6_fib.c
1181
ins = &iter->fib6_next;
net/ipv6/ip6_fib.c
1189
iter = rcu_dereference_protected(*ins,
net/ipv6/ip6_fib.c
1283
rcu_assign_pointer(rt->fib6_next, iter);
net/ipv6/ip6_fib.c
1316
rt->fib6_next = iter->fib6_next;
net/ipv6/ip6_fib.c
1324
nsiblings = iter->fib6_nsiblings;
net/ipv6/ip6_fib.c
1325
iter->fib6_node = NULL;
net/ipv6/ip6_fib.c
1326
list_add(&iter->purge_link, purge_list);
net/ipv6/ip6_fib.c
1327
if (rcu_access_pointer(fn->rr_ptr) == iter)
net/ipv6/ip6_fib.c
1333
iter = rcu_dereference_protected(*ins,
net/ipv6/ip6_fib.c
1335
while (iter) {
net/ipv6/ip6_fib.c
1336
if (iter->fib6_metric > rt->fib6_metric)
net/ipv6/ip6_fib.c
1338
if (rt6_qualify_for_ecmp(iter)) {
net/ipv6/ip6_fib.c
1339
*ins = iter->fib6_next;
net/ipv6/ip6_fib.c
1340
iter->fib6_node = NULL;
net/ipv6/ip6_fib.c
1341
list_add(&iter->purge_link, purge_list);
net/ipv6/ip6_fib.c
1342
if (rcu_access_pointer(fn->rr_ptr) == iter)
net/ipv6/ip6_fib.c
1347
ins = &iter->fib6_next;
net/ipv6/ip6_fib.c
1349
iter = rcu_dereference_protected(*ins,
net/ipv6/ip6_fib.c
1543
struct fib6_info *iter, *next;
net/ipv6/ip6_fib.c
1545
list_for_each_entry_safe(iter, next, &purge_list, purge_link) {
net/ipv6/ip6_fib.c
1546
list_del(&iter->purge_link);
net/ipv6/ip6_fib.c
1547
fib6_purge_rt(iter, fn, info->nl_net);
net/ipv6/ip6_fib.c
1548
fib6_info_release(iter);
net/ipv6/ip6_fib.c
1855
int iter = 0;
net/ipv6/ip6_fib.c
1880
pr_debug("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
net/ipv6/ip6_fib.c
1881
iter++;
net/ipv6/ip6_fib.c
2611
struct ipv6_route_iter *iter = seq->private;
net/ipv6/ip6_fib.c
2637
iter->w.leaf = NULL;
net/ipv6/ip6_fib.c
2643
struct ipv6_route_iter *iter = w->args;
net/ipv6/ip6_fib.c
2645
if (!iter->skip)
net/ipv6/ip6_fib.c
2649
iter->w.leaf = rcu_dereference_protected(
net/ipv6/ip6_fib.c
2650
iter->w.leaf->fib6_next,
net/ipv6/ip6_fib.c
2651
lockdep_is_held(&iter->tbl->tb6_lock));
net/ipv6/ip6_fib.c
2652
iter->skip--;
net/ipv6/ip6_fib.c
2653
if (!iter->skip && iter->w.leaf)
net/ipv6/ip6_fib.c
2655
} while (iter->w.leaf);
net/ipv6/ip6_fib.c
2660
static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
net/ipv6/ip6_fib.c
2663
memset(&iter->w, 0, sizeof(iter->w));
net/ipv6/ip6_fib.c
2664
iter->w.func = ipv6_route_yield;
net/ipv6/ip6_fib.c
2665
iter->w.root = &iter->tbl->tb6_root;
net/ipv6/ip6_fib.c
2666
iter->w.state = FWS_INIT;
net/ipv6/ip6_fib.c
2667
iter->w.node = iter->w.root;
net/ipv6/ip6_fib.c
2668
iter->w.args = iter;
net/ipv6/ip6_fib.c
2669
iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
net/ipv6/ip6_fib.c
2670
INIT_LIST_HEAD(&iter->w.lh);
net/ipv6/ip6_fib.c
2671
fib6_walker_link(net, &iter->w);
net/ipv6/ip6_fib.c
2695
static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
net/ipv6/ip6_fib.c
2697
int sernum = READ_ONCE(iter->w.root->fn_sernum);
net/ipv6/ip6_fib.c
2699
if (iter->sernum != sernum) {
net/ipv6/ip6_fib.c
2700
iter->sernum = sernum;
net/ipv6/ip6_fib.c
2701
iter->w.state = FWS_INIT;
net/ipv6/ip6_fib.c
2702
iter->w.node = iter->w.root;
net/ipv6/ip6_fib.c
2703
WARN_ON(iter->w.skip);
net/ipv6/ip6_fib.c
2704
iter->w.skip = iter->w.count;
net/ipv6/ip6_fib.c
2713
struct ipv6_route_iter *iter = seq->private;
net/ipv6/ip6_fib.c
2724
ipv6_route_check_sernum(iter);
net/ipv6/ip6_fib.c
2725
spin_lock_bh(&iter->tbl->tb6_lock);
net/ipv6/ip6_fib.c
2726
r = fib6_walk_continue(&iter->w);
net/ipv6/ip6_fib.c
2727
spin_unlock_bh(&iter->tbl->tb6_lock);
net/ipv6/ip6_fib.c
2729
return iter->w.leaf;
net/ipv6/ip6_fib.c
2731
fib6_walker_unlink(net, &iter->w);
net/ipv6/ip6_fib.c
2734
fib6_walker_unlink(net, &iter->w);
net/ipv6/ip6_fib.c
2736
iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
net/ipv6/ip6_fib.c
2737
if (!iter->tbl)
net/ipv6/ip6_fib.c
2740
ipv6_route_seq_setup_walk(iter, net);
net/ipv6/ip6_fib.c
2748
struct ipv6_route_iter *iter = seq->private;
net/ipv6/ip6_fib.c
2751
iter->tbl = ipv6_route_seq_next_table(NULL, net);
net/ipv6/ip6_fib.c
2752
iter->skip = *pos;
net/ipv6/ip6_fib.c
2754
if (iter->tbl) {
net/ipv6/ip6_fib.c
2757
ipv6_route_seq_setup_walk(iter, net);
net/ipv6/ip6_fib.c
2764
static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
net/ipv6/ip6_fib.c
2766
struct fib6_walker *w = &iter->w;
net/ipv6/ip6_fib.c
2774
struct ipv6_route_iter *iter = seq->private;
net/ipv6/ip6_fib.c
2776
if (ipv6_route_iter_active(iter))
net/ipv6/ip6_fib.c
2777
fib6_walker_unlink(net, &iter->w);
net/ipv6/ip6_fib.c
2796
struct ipv6_route_iter *iter = seq->private;
net/ipv6/ip6_fib.c
2807
iter->w.leaf = NULL;
net/ipv6/ip6_gre.c
278
struct ip6_tnl *iter;
net/ipv6/ip6_gre.c
281
(iter = rtnl_dereference(*tp)) != NULL;
net/ipv6/ip6_gre.c
282
tp = &iter->next) {
net/ipv6/ip6_gre.c
283
if (t == iter) {
net/ipv6/ip6_output.c
700
struct ip6_fraglist_iter *iter)
net/ipv6/ip6_output.c
707
iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
net/ipv6/ip6_output.c
708
if (!iter->tmp_hdr)
net/ipv6/ip6_output.c
711
iter->frag = skb_shinfo(skb)->frag_list;
net/ipv6/ip6_output.c
714
iter->offset = 0;
net/ipv6/ip6_output.c
715
iter->hlen = hlen;
net/ipv6/ip6_output.c
716
iter->frag_id = frag_id;
net/ipv6/ip6_output.c
717
iter->nexthdr = nexthdr;
net/ipv6/ip6_output.c
723
memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
net/ipv6/ip6_output.c
740
struct ip6_fraglist_iter *iter)
net/ipv6/ip6_output.c
742
struct sk_buff *frag = iter->frag;
net/ipv6/ip6_output.c
743
unsigned int hlen = iter->hlen;
net/ipv6/ip6_output.c
751
memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
net/ipv6/ip6_output.c
752
iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
net/ipv6/ip6_output.c
753
fh->nexthdr = iter->nexthdr;
net/ipv6/ip6_output.c
755
fh->frag_off = htons(iter->offset);
net/ipv6/ip6_output.c
758
fh->identification = iter->frag_id;
net/ipv6/ip6_output.c
922
struct ip6_fraglist_iter iter;
net/ipv6/ip6_output.c
951
&iter);
net/ipv6/ip6_output.c
961
if (iter.frag)
net/ipv6/ip6_output.c
962
ip6_fraglist_prepare(skb, &iter);
net/ipv6/ip6_output.c
970
if (err || !iter.frag)
net/ipv6/ip6_output.c
973
skb = ip6_fraglist_next(&iter);
net/ipv6/ip6_output.c
976
kfree(iter.tmp_hdr);
net/ipv6/ip6_output.c
985
kfree_skb_list(iter.frag);
net/ipv6/ip6_tunnel.c
231
struct ip6_tnl *iter;
net/ipv6/ip6_tunnel.c
237
(iter = rtnl_dereference(*tp)) != NULL;
net/ipv6/ip6_tunnel.c
238
tp = &iter->next) {
net/ipv6/ip6_tunnel.c
239
if (t == iter) {
net/ipv6/ip6_vti.c
166
struct ip6_tnl *iter;
net/ipv6/ip6_vti.c
169
(iter = rtnl_dereference(*tp)) != NULL;
net/ipv6/ip6_vti.c
170
tp = &iter->next) {
net/ipv6/ip6_vti.c
171
if (t == iter) {
net/ipv6/ip6mr.c
426
struct mr_vif_iter *iter = seq->private;
net/ipv6/ip6mr.c
437
iter->mrt = mrt;
net/ipv6/ip6mr.c
450
struct mr_vif_iter *iter = seq->private;
net/ipv6/ip6mr.c
451
struct mr_table *mrt = iter->mrt;
net/ipv6/netfilter.c
167
struct ip6_fraglist_iter iter;
net/ipv6/netfilter.c
188
&iter);
net/ipv6/netfilter.c
196
if (iter.frag)
net/ipv6/netfilter.c
197
ip6_fraglist_prepare(skb, &iter);
net/ipv6/netfilter.c
201
if (err || !iter.frag)
net/ipv6/netfilter.c
204
skb = ip6_fraglist_next(&iter);
net/ipv6/netfilter.c
207
kfree(iter.tmp_hdr);
net/ipv6/netfilter.c
211
kfree_skb_list(iter.frag);
net/ipv6/netfilter/ip6_tables.c
1061
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1098
xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
net/ipv6/netfilter/ip6_tables.c
1099
cleanup_entry(iter, net);
net/ipv6/netfilter/ip6_tables.c
1126
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1165
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv6/netfilter/ip6_tables.c
1166
cleanup_entry(iter, net);
net/ipv6/netfilter/ip6_tables.c
1181
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1202
xt_entry_foreach(iter, private->entries, private->size) {
net/ipv6/netfilter/ip6_tables.c
1205
tmp = xt_get_this_cpu_counter(&iter->counters);
net/ipv6/netfilter/ip6_tables.c
1506
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1545
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv6/netfilter/ip6_tables.c
1546
cleanup_entry(iter, net);
net/ipv6/netfilter/ip6_tables.c
1568
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1576
xt_entry_foreach(iter, private->entries, total_size) {
net/ipv6/netfilter/ip6_tables.c
1577
ret = compat_copy_entry_to_user(iter, &pos,
net/ipv6/netfilter/ip6_tables.c
1719
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1725
xt_entry_foreach(iter, loc_cpu_entry, private->size)
net/ipv6/netfilter/ip6_tables.c
1726
cleanup_entry(iter, net);
net/ipv6/netfilter/ip6_tables.c
1759
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
1761
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
net/ipv6/netfilter/ip6_tables.c
1762
cleanup_entry(iter, net);
net/ipv6/netfilter/ip6_tables.c
221
const struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
229
xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
net/ipv6/netfilter/ip6_tables.c
230
if (get_chainname_rulenum(iter, e, hookname,
net/ipv6/netfilter/ip6_tables.c
681
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
700
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv6/netfilter/ip6_tables.c
701
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
net/ipv6/netfilter/ip6_tables.c
709
offsets[i] = (void *)iter - entry0;
net/ipv6/netfilter/ip6_tables.c
711
if (strcmp(ip6t_get_target(iter)->u.user.name,
net/ipv6/netfilter/ip6_tables.c
732
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv6/netfilter/ip6_tables.c
733
ret = find_check_entry(iter, net, repl->name, repl->size,
net/ipv6/netfilter/ip6_tables.c
741
xt_entry_foreach(iter, entry0, newinfo->size) {
net/ipv6/netfilter/ip6_tables.c
744
cleanup_entry(iter, net);
net/ipv6/netfilter/ip6_tables.c
759
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
767
xt_entry_foreach(iter, t->entries, t->size) {
net/ipv6/netfilter/ip6_tables.c
772
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv6/netfilter/ip6_tables.c
789
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
794
xt_entry_foreach(iter, t->entries, t->size) {
net/ipv6/netfilter/ip6_tables.c
797
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
net/ipv6/netfilter/ip6_tables.c
939
struct ip6t_entry *iter;
net/ipv6/netfilter/ip6_tables.c
953
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
net/ipv6/netfilter/ip6_tables.c
954
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
net/ipv6/route.c
417
struct fib6_info *iter;
net/ipv6/route.c
423
iter = rcu_dereference(fn->leaf);
net/ipv6/route.c
424
if (!iter)
net/ipv6/route.c
427
while (iter) {
net/ipv6/route.c
428
if (iter->fib6_metric == rt->fib6_metric &&
net/ipv6/route.c
429
rt6_qualify_for_ecmp(iter))
net/ipv6/route.c
430
return iter;
net/ipv6/route.c
431
iter = rcu_dereference(iter->fib6_next);
net/ipv6/route.c
4779
struct fib6_info *iter;
net/ipv6/route.c
4784
iter = rcu_dereference_protected(fn->leaf,
net/ipv6/route.c
4786
while (iter) {
net/ipv6/route.c
4787
if (iter->fib6_metric == rt->fib6_metric &&
net/ipv6/route.c
4788
rt6_qualify_for_ecmp(iter))
net/ipv6/route.c
4789
return iter;
net/ipv6/route.c
4790
iter = rcu_dereference_protected(iter->fib6_next,
net/ipv6/route.c
4810
struct fib6_info *iter;
net/ipv6/route.c
4816
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
net/ipv6/route.c
4817
if (!rt6_is_dead(iter))
net/ipv6/route.c
4818
total += iter->fib6_nh->fib_nh_weight;
net/ipv6/route.c
4838
struct fib6_info *iter;
net/ipv6/route.c
4843
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
net/ipv6/route.c
4844
rt6_upper_bound_set(iter, &weight, total);
net/ipv6/route.c
4905
struct fib6_info *iter;
net/ipv6/route.c
4909
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
net/ipv6/route.c
4910
if (iter->fib6_nh->fib_nh_dev == dev)
net/ipv6/route.c
4918
struct fib6_info *iter;
net/ipv6/route.c
4921
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
net/ipv6/route.c
4922
iter->should_flush = 1;
net/ipv6/route.c
4928
struct fib6_info *iter;
net/ipv6/route.c
4934
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
net/ipv6/route.c
4935
if (iter->fib6_nh->fib_nh_dev == down_dev ||
net/ipv6/route.c
4936
iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
net/ipv6/route.c
4946
struct fib6_info *iter;
net/ipv6/route.c
4950
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
net/ipv6/route.c
4951
if (iter->fib6_nh->fib_nh_dev == dev)
net/ipv6/route.c
4952
iter->fib6_nh->fib_nh_flags |= nh_flags;
net/ipv6/seg6.c
336
struct rhashtable_iter *iter;
net/ipv6/seg6.c
339
iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/seg6.c
341
if (!iter) {
net/ipv6/seg6.c
342
iter = kmalloc_obj(*iter);
net/ipv6/seg6.c
343
if (!iter)
net/ipv6/seg6.c
346
cb->args[0] = (long)iter;
net/ipv6/seg6.c
349
rhashtable_walk_enter(&sdata->hmac_infos, iter);
net/ipv6/seg6.c
356
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/seg6.c
358
rhashtable_walk_exit(iter);
net/ipv6/seg6.c
360
kfree(iter);
net/ipv6/seg6.c
367
struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
net/ipv6/seg6.c
371
rhashtable_walk_start(iter);
net/ipv6/seg6.c
374
hinfo = rhashtable_walk_next(iter);
net/ipv6/seg6.c
397
rhashtable_walk_stop(iter);
net/ipv6/sit.c
165
struct ip_tunnel *iter;
net/ipv6/sit.c
168
(iter = rtnl_dereference(*tp)) != NULL;
net/ipv6/sit.c
169
tp = &iter->next) {
net/ipv6/sit.c
170
if (t == iter) {
net/mac80211/cfg.c
4051
struct ieee80211_link_data *iter;
net/mac80211/cfg.c
4053
for_each_sdata_link_rcu(local, iter) {
net/mac80211/cfg.c
4054
if (iter->sdata == sdata ||
net/mac80211/cfg.c
4055
rcu_access_pointer(iter->conf->tx_bss_conf) != tx_bss_conf)
net/mac80211/cfg.c
4058
wiphy_work_queue(iter->sdata->local->hw.wiphy,
net/mac80211/cfg.c
4059
&iter->csa.finalize_work);
net/mac80211/chan.c
100
iter->link = NULL;
net/mac80211/chan.c
101
iter->per_link = CHANCTX_ITER_POS_ASSIGNED;
net/mac80211/chan.c
102
iter->iftype = iter->sdata->vif.type;
net/mac80211/chan.c
106
iter->done = true;
net/mac80211/chan.c
109
#define for_each_chanctx_user_assigned(local, ctx, iter) \
net/mac80211/chan.c
1094
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
110
for (ieee80211_chanctx_user_iter_next(local, ctx, iter, \
net/mac80211/chan.c
1103
for_each_chanctx_user_assigned(local, chanctx, &iter) {
net/mac80211/chan.c
1106
switch (iter.iftype) {
net/mac80211/chan.c
1108
if (!iter.sdata->u.mgd.associated)
net/mac80211/chan.c
1124
if (iter.iftype == NL80211_IFTYPE_MONITOR) {
net/mac80211/chan.c
1129
switch (iter.link->smps_mode) {
net/mac80211/chan.c
113
!((iter)->done); \
net/mac80211/chan.c
1132
iter.link->smps_mode);
net/mac80211/chan.c
1135
needed_static = iter.link->needed_rx_chains;
net/mac80211/chan.c
1136
needed_dynamic = iter.link->needed_rx_chains;
net/mac80211/chan.c
114
ieee80211_chanctx_user_iter_next(local, ctx, iter, \
net/mac80211/chan.c
1140
needed_dynamic = iter.link->needed_rx_chains;
net/mac80211/chan.c
118
#define for_each_chanctx_user_reserved(local, ctx, iter) \
net/mac80211/chan.c
119
for (ieee80211_chanctx_user_iter_next(local, ctx, iter, \
net/mac80211/chan.c
122
!((iter)->done); \
net/mac80211/chan.c
123
ieee80211_chanctx_user_iter_next(local, ctx, iter, \
net/mac80211/chan.c
127
#define for_each_chanctx_user_all(local, ctx, iter) \
net/mac80211/chan.c
128
for (ieee80211_chanctx_user_iter_next(local, ctx, iter, \
net/mac80211/chan.c
131
!((iter)->done); \
net/mac80211/chan.c
132
ieee80211_chanctx_user_iter_next(local, ctx, iter, \
net/mac80211/chan.c
139
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
142
for_each_chanctx_user_assigned(local, ctx, &iter)
net/mac80211/chan.c
151
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
154
for_each_chanctx_user_reserved(local, ctx, &iter)
net/mac80211/chan.c
1623
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
163
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
1633
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1634
if (!ieee80211_link_has_in_place_reservation(iter.link))
net/mac80211/chan.c
1637
old_ctx = ieee80211_link_get_chanctx(iter.link);
net/mac80211/chan.c
1638
vif_chsw[i].vif = &iter.sdata->vif;
net/mac80211/chan.c
1641
vif_chsw[i].link_conf = iter.link->conf;
net/mac80211/chan.c
166
for_each_chanctx_user_all(local, ctx, &iter)
net/mac80211/chan.c
1718
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
1734
for_each_chanctx_user_assigned(local, ctx->replace_ctx, &iter) {
net/mac80211/chan.c
1736
if (iter.link->reserved_chanctx) {
net/mac80211/chan.c
1738
if (iter.link->reserved_ready)
net/mac80211/chan.c
1755
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1756
if (ieee80211_link_has_in_place_reservation(iter.link) &&
net/mac80211/chan.c
1757
!iter.link->reserved_ready)
net/mac80211/chan.c
1760
old_ctx = ieee80211_link_get_chanctx(iter.link);
net/mac80211/chan.c
1771
if (iter.radar_required)
net/mac80211/chan.c
1786
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
1796
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1797
if (!ieee80211_link_has_in_place_reservation(iter.link))
net/mac80211/chan.c
1801
ieee80211_link_get_chanctx(iter.link),
net/mac80211/chan.c
1830
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
1840
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1841
struct ieee80211_link_data *link = iter.link;
net/mac80211/chan.c
1842
struct ieee80211_sub_if_data *sdata = iter.sdata;
net/mac80211/chan.c
1858
link->radar_required = iter.radar_required;
net/mac80211/chan.c
1860
if (link_conf->chanreq.oper.width != iter.chanreq->oper.width)
net/mac80211/chan.c
1877
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1878
if (ieee80211_link_get_chanctx(iter.link) != ctx)
net/mac80211/chan.c
1881
iter.link->reserved_chanctx = NULL;
net/mac80211/chan.c
1883
ieee80211_link_chanctx_reservation_complete(iter.link);
net/mac80211/chan.c
1894
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1895
struct ieee80211_link_data *link = iter.link;
net/mac80211/chan.c
1940
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
1945
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
1946
ieee80211_link_unreserve_chanctx(iter.link);
net/mac80211/chan.c
1947
ieee80211_link_chanctx_reservation_complete(iter.link);
net/mac80211/chan.c
2150
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
2154
for_each_chanctx_user_all(local, ctx, &iter) {
net/mac80211/chan.c
2155
if (iter.link == skip_link)
net/mac80211/chan.c
2158
ret = ieee80211_chanreq_compatible(ret, iter.chanreq, tmp);
net/mac80211/chan.c
2272
void (*iter)(struct ieee80211_hw *hw,
net/mac80211/chan.c
2283
iter(hw, &ctx->conf, iter_data);
net/mac80211/chan.c
2290
void (*iter)(struct ieee80211_hw *hw,
net/mac80211/chan.c
2302
iter(hw, &ctx->conf, iter_data);
net/mac80211/chan.c
268
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
275
for_each_chanctx_user_reserved(local, ctx, &iter) {
net/mac80211/chan.c
276
req = ieee80211_chanreq_compatible(iter.chanreq, req, tmp);
net/mac80211/chan.c
291
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
295
for_each_chanctx_user_assigned(local, ctx, &iter) {
net/mac80211/chan.c
296
if (iter.link->reserved_chanctx)
net/mac80211/chan.c
299
comp_def = ieee80211_chanreq_compatible(iter.chanreq,
net/mac80211/chan.c
36
struct ieee80211_chanctx_user_iter *iter,
net/mac80211/chan.c
43
memset(iter, 0, sizeof(*iter));
net/mac80211/chan.c
48
for (int link_id = iter->link ? iter->link->link_id : 0;
net/mac80211/chan.c
49
link_id < ARRAY_SIZE(iter->sdata->link);
net/mac80211/chan.c
53
link = sdata_dereference(iter->sdata->link[link_id],
net/mac80211/chan.c
54
iter->sdata);
net/mac80211/chan.c
58
switch (iter->per_link) {
net/mac80211/chan.c
60
iter->per_link = CHANCTX_ITER_POS_RESERVED;
net/mac80211/chan.c
63
iter->link = link;
net/mac80211/chan.c
64
iter->reserved = false;
net/mac80211/chan.c
65
iter->radar_required = link->radar_required;
net/mac80211/chan.c
66
iter->chanreq = &link->conf->chanreq;
net/mac80211/chan.c
71
iter->per_link = CHANCTX_ITER_POS_DONE;
net/mac80211/chan.c
74
iter->link = link;
net/mac80211/chan.c
75
iter->reserved = true;
net/mac80211/chan.c
76
iter->radar_required =
net/mac80211/chan.c
79
iter->chanreq = &link->reserved;
net/mac80211/chan.c
817
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
821
for_each_chanctx_user_assigned(local, ctx, &iter) {
net/mac80211/chan.c
822
if (iter.radar_required)
net/mac80211/chan.c
84
iter->per_link = CHANCTX_ITER_POS_ASSIGNED;
net/mac80211/chan.c
91
iter->sdata = list_prepare_entry(iter->sdata, &local->interfaces, list);
net/mac80211/chan.c
92
list_for_each_entry_continue(iter->sdata, &local->interfaces, list) {
net/mac80211/chan.c
93
if (!ieee80211_sdata_running(iter->sdata))
net/mac80211/chan.c
940
struct ieee80211_chanctx_user_iter iter;
net/mac80211/chan.c
946
for_each_chanctx_user_assigned(local, ctx, &iter) {
net/mac80211/chan.c
948
compat = iter.chanreq;
net/mac80211/chan.c
950
compat = ieee80211_chanreq_compatible(iter.chanreq,
net/mac80211/chan.c
97
if (iter->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
net/mac80211/iface.c
110
struct ieee80211_sub_if_data *iter;
net/mac80211/iface.c
121
list_for_each_entry(iter, &local->interfaces, list) {
net/mac80211/iface.c
122
if (iter->vif.type == NL80211_IFTYPE_NAN &&
net/mac80211/iface.c
123
iter->u.nan.started) {
net/mac80211/iface.c
165
struct ieee80211_sub_if_data *iter;
net/mac80211/iface.c
188
list_for_each_entry(iter, &local->interfaces, list) {
net/mac80211/iface.c
189
if (iter == sdata)
net/mac80211/iface.c
192
if (iter->vif.type == NL80211_IFTYPE_MONITOR &&
net/mac80211/iface.c
193
!(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE))
net/mac80211/iface.c
196
m = iter->vif.addr;
net/mac80211/key.c
1001
void (*iter)(struct ieee80211_hw *hw,
net/mac80211/key.c
1013
iter(hw, vif, key->sta ? &key->sta->sta : NULL,
net/mac80211/key.c
1019
void (*iter)(struct ieee80211_hw *hw,
net/mac80211/key.c
1035
ieee80211_key_iter(hw, vif, key, iter, iter_data);
net/mac80211/key.c
1041
iter, iter_data);
net/mac80211/key.c
1049
void (*iter)(struct ieee80211_hw *hw,
net/mac80211/key.c
1059
ieee80211_key_iter(hw, &sdata->vif, key, iter, iter_data);
net/mac80211/key.c
1064
void (*iter)(struct ieee80211_hw *hw,
net/mac80211/key.c
1076
_ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
net/mac80211/key.c
1079
_ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data);
net/mac80211/tx.c
4212
struct txq_info *iter, *tmp, *txqi = to_txq_info(txq);
net/mac80211/tx.c
4227
list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac],
net/mac80211/tx.c
4229
if (iter == txqi)
net/mac80211/tx.c
4232
if (!iter->txq.sta) {
net/mac80211/tx.c
4233
list_move_tail(&iter->schedule_order,
net/mac80211/tx.c
4237
sta = container_of(iter->txq.sta, struct sta_info, sta);
net/mac80211/tx.c
4240
list_move_tail(&iter->schedule_order, &local->active_txqs[ac]);
net/mptcp/pm.c
973
struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
net/mptcp/pm.c
986
mptcp_for_each_subflow(msk, iter) {
net/mptcp/pm.c
987
if (iter != subflow && mptcp_subflow_active(iter) &&
net/mptcp/pm.c
988
iter->stale_count < active_max_loss_cnt) {
net/netfilter/ipvs/ip_vs_conn.c
1052
static void *ip_vs_conn_array(struct ip_vs_iter_state *iter)
net/netfilter/ipvs/ip_vs_conn.c
1057
for (idx = iter->bucket; idx < ip_vs_conn_tab_size; idx++) {
net/netfilter/ipvs/ip_vs_conn.c
1064
if (skip >= iter->skip_elems) {
net/netfilter/ipvs/ip_vs_conn.c
1065
iter->bucket = idx;
net/netfilter/ipvs/ip_vs_conn.c
1072
iter->skip_elems = 0;
net/netfilter/ipvs/ip_vs_conn.c
1076
iter->bucket = idx;
net/netfilter/ipvs/ip_vs_conn.c
1083
struct ip_vs_iter_state *iter = seq->private;
net/netfilter/ipvs/ip_vs_conn.c
1087
iter->skip_elems = 0;
net/netfilter/ipvs/ip_vs_conn.c
1088
iter->bucket = 0;
net/netfilter/ipvs/ip_vs_conn.c
1092
return ip_vs_conn_array(iter);
net/netfilter/ipvs/ip_vs_conn.c
1098
struct ip_vs_iter_state *iter = seq->private;
net/netfilter/ipvs/ip_vs_conn.c
1103
return ip_vs_conn_array(iter);
net/netfilter/ipvs/ip_vs_conn.c
1108
iter->skip_elems++;
net/netfilter/ipvs/ip_vs_conn.c
1112
iter->skip_elems = 0;
net/netfilter/ipvs/ip_vs_conn.c
1113
iter->bucket++;
net/netfilter/ipvs/ip_vs_conn.c
1115
return ip_vs_conn_array(iter);
net/netfilter/ipvs/ip_vs_ctl.c
2302
struct ip_vs_iter *iter = seq->private;
net/netfilter/ipvs/ip_vs_ctl.c
2310
iter->table = ip_vs_svc_table;
net/netfilter/ipvs/ip_vs_ctl.c
2311
iter->bucket = idx;
net/netfilter/ipvs/ip_vs_ctl.c
2322
iter->table = ip_vs_svc_fwm_table;
net/netfilter/ipvs/ip_vs_ctl.c
2323
iter->bucket = idx;
net/netfilter/ipvs/ip_vs_ctl.c
2343
struct ip_vs_iter *iter;
net/netfilter/ipvs/ip_vs_ctl.c
2351
iter = seq->private;
net/netfilter/ipvs/ip_vs_ctl.c
2353
if (iter->table == ip_vs_svc_table) {
net/netfilter/ipvs/ip_vs_ctl.c
2359
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
net/netfilter/ipvs/ip_vs_ctl.c
2361
&ip_vs_svc_table[iter->bucket],
net/netfilter/ipvs/ip_vs_ctl.c
2367
iter->table = ip_vs_svc_fwm_table;
net/netfilter/ipvs/ip_vs_ctl.c
2368
iter->bucket = -1;
net/netfilter/ipvs/ip_vs_ctl.c
2378
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
net/netfilter/ipvs/ip_vs_ctl.c
2380
&ip_vs_svc_fwm_table[iter->bucket],
net/netfilter/ipvs/ip_vs_ctl.c
2409
const struct ip_vs_iter *iter = seq->private;
net/netfilter/ipvs/ip_vs_ctl.c
2416
if (iter->table == ip_vs_svc_table) {
net/netfilter/nf_conntrack_core.c
2309
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
net/netfilter/nf_conntrack_core.c
2346
if (iter(ct, iter_data->data))
net/netfilter/nf_conntrack_core.c
2362
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
net/netfilter/nf_conntrack_core.c
2371
while ((ct = get_next_corpse(iter, iter_data, &bucket)) != NULL) {
net/netfilter/nf_conntrack_core.c
2381
void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
net/netfilter/nf_conntrack_core.c
2392
nf_ct_iterate_cleanup(iter, iter_data);
net/netfilter/nf_conntrack_core.c
2408
nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
net/netfilter/nf_conntrack_core.c
2439
nf_ct_iterate_cleanup(iter, &iter_data);
net/netfilter/nf_conntrack_expect.c
535
void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
net/netfilter/nf_conntrack_expect.c
548
if (iter(exp, data) && timer_delete(&exp->timeout)) {
net/netfilter/nf_conntrack_expect.c
560
bool (*iter)(struct nf_conntrack_expect *e, void *data),
net/netfilter/nf_conntrack_expect.c
578
if (iter(exp, data) && timer_delete(&exp->timeout)) {
net/netfilter/nf_conntrack_netlink.c
1594
struct nf_ct_iter_data iter = {
net/netfilter/nf_conntrack_netlink.c
1605
iter.data = filter;
net/netfilter/nf_conntrack_netlink.c
1608
nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter);
net/netfilter/nf_flow_table_core.c
422
void (*iter)(struct nf_flowtable *flowtable,
net/netfilter/nf_flow_table_core.c
447
iter(flow_table, flow, data);
net/netfilter/nf_flow_table_xdp.c
102
hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
net/netfilter/nf_flow_table_xdp.c
103
if (key == iter->net_device_addr) {
net/netfilter/nf_flow_table_xdp.c
104
elem = iter;
net/netfilter/nf_flow_table_xdp.c
30
struct flow_offload_xdp *iter;
net/netfilter/nf_flow_table_xdp.c
32
hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) {
net/netfilter/nf_flow_table_xdp.c
33
if (key == iter->net_device_addr) {
net/netfilter/nf_flow_table_xdp.c
40
ft_elem = list_first_or_null_rcu(&iter->head,
net/netfilter/nf_flow_table_xdp.c
53
struct flow_offload_xdp *iter, *elem = NULL;
net/netfilter/nf_flow_table_xdp.c
65
hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) {
net/netfilter/nf_flow_table_xdp.c
66
if (key == iter->net_device_addr) {
net/netfilter/nf_flow_table_xdp.c
67
elem = iter;
net/netfilter/nf_flow_table_xdp.c
97
struct flow_offload_xdp *iter, *elem = NULL;
net/netfilter/nf_nat_masquerade.c
103
int (*iter)(struct nf_conn *i, void *data),
net/netfilter/nf_nat_masquerade.c
127
w->iter = iter;
net/netfilter/nf_nat_masquerade.c
18
int (*iter)(struct nf_conn *i, void *data);
net/netfilter/nf_nat_masquerade.c
87
nf_ct_iterate_cleanup_net(w->iter, &iter_data);
net/netfilter/nf_tables_api.c
4164
const struct nft_set_iter *iter,
net/netfilter/nf_tables_api.c
4172
if (!nft_set_elem_active(ext, iter->genmask))
net/netfilter/nf_tables_api.c
5751
const struct nft_set_iter *iter,
net/netfilter/nf_tables_api.c
5756
if (!nft_set_elem_active(ext, iter->genmask))
net/netfilter/nf_tables_api.c
5788
struct nft_set_iter iter = {
net/netfilter/nf_tables_api.c
5807
set->ops->walk(ctx, set, &iter);
net/netfilter/nf_tables_api.c
5808
if (!iter.err)
net/netfilter/nf_tables_api.c
5809
iter.err = nft_set_catchall_bind_check(ctx, set);
net/netfilter/nf_tables_api.c
5811
if (iter.err < 0)
net/netfilter/nf_tables_api.c
5812
return iter.err;
net/netfilter/nf_tables_api.c
5846
const struct nft_set_iter *iter,
net/netfilter/nf_tables_api.c
5852
if (nft_set_elem_active(ext, iter->genmask))
net/netfilter/nf_tables_api.c
5880
struct nft_set_iter iter = {
net/netfilter/nf_tables_api.c
5886
set->ops->walk(ctx, set, &iter);
net/netfilter/nf_tables_api.c
5887
WARN_ON_ONCE(iter.err);
net/netfilter/nf_tables_api.c
6138
struct nft_set_iter iter;
net/netfilter/nf_tables_api.c
6145
const struct nft_set_iter *iter,
net/netfilter/nf_tables_api.c
6151
if (!nft_set_elem_active(ext, iter->genmask))
net/netfilter/nf_tables_api.c
6157
args = container_of(iter, struct nft_set_dump_args, iter);
net/netfilter/nf_tables_api.c
6213
.iter = {
net/netfilter/nf_tables_api.c
6270
set->ops->walk(&dump_ctx->ctx, set, &args.iter);
net/netfilter/nf_tables_api.c
6272
if (!args.iter.err && args.iter.count == cb->args[0])
net/netfilter/nf_tables_api.c
6273
args.iter.err = nft_set_catchall_dump(net, skb, set,
net/netfilter/nf_tables_api.c
6278
if (dump_ctx->reset && args.iter.count > args.iter.skip)
net/netfilter/nf_tables_api.c
6280
args.iter.count - args.iter.skip);
net/netfilter/nf_tables_api.c
6284
if (args.iter.err && args.iter.err != -EMSGSIZE)
net/netfilter/nf_tables_api.c
6285
return args.iter.err;
net/netfilter/nf_tables_api.c
6286
if (args.iter.count == cb->args[0])
net/netfilter/nf_tables_api.c
6289
cb->args[0] = args.iter.count;
net/netfilter/nf_tables_api.c
7829
const struct nft_set_iter *iter,
net/netfilter/nf_tables_api.c
7835
if (!nft_set_elem_active(ext, iter->genmask))
net/netfilter/nf_tables_api.c
7900
struct nft_set_iter iter = {
net/netfilter/nf_tables_api.c
7906
set->ops->walk(ctx, set, &iter);
net/netfilter/nf_tables_api.c
7907
if (!iter.err)
net/netfilter/nf_tables_api.c
7908
iter.err = nft_set_catchall_flush(ctx, set);
net/netfilter/nf_tables_api.c
7910
return iter.err;
net/netfilter/nf_tables_api.c
798
const struct nft_set_iter *iter,
net/netfilter/nf_tables_api.c
803
if (!nft_set_elem_active(ext, iter->genmask))
net/netfilter/nf_tables_api.c
842
struct nft_set_iter iter = {
net/netfilter/nf_tables_api.c
848
set->ops->walk(ctx, set, &iter);
net/netfilter/nf_tables_api.c
849
WARN_ON_ONCE(iter.err);
net/netfilter/nft_lookup.c
249
struct nft_set_iter iter = {
net/netfilter/nft_lookup.c
259
priv->set->ops->walk(ctx, priv->set, &iter);
net/netfilter/nft_lookup.c
260
if (!iter.err)
net/netfilter/nft_lookup.c
261
iter.err = nft_set_catchall_validate(ctx, priv->set);
net/netfilter/nft_lookup.c
263
if (iter.err < 0)
net/netfilter/nft_lookup.c
264
return iter.err;
net/netfilter/nft_set_bitmap.c
224
struct nft_set_iter *iter)
net/netfilter/nft_set_bitmap.c
231
if (iter->count < iter->skip)
net/netfilter/nft_set_bitmap.c
234
iter->err = iter->fn(ctx, set, iter, &be->priv);
net/netfilter/nft_set_bitmap.c
236
if (iter->err < 0)
net/netfilter/nft_set_bitmap.c
239
iter->count++;
net/netfilter/nft_set_hash.c
268
struct nft_set_iter *iter)
net/netfilter/nft_set_hash.c
280
iter->err = PTR_ERR(he);
net/netfilter/nft_set_hash.c
287
if (iter->count < iter->skip)
net/netfilter/nft_set_hash.c
290
iter->err = iter->fn(ctx, set, iter, &he->priv);
net/netfilter/nft_set_hash.c
291
if (iter->err < 0)
net/netfilter/nft_set_hash.c
295
iter->count++;
net/netfilter/nft_set_hash.c
303
struct nft_set_iter *iter)
net/netfilter/nft_set_hash.c
320
iter->err = -EMLINK;
net/netfilter/nft_set_hash.c
339
iter->err = PTR_ERR(he);
net/netfilter/nft_set_hash.c
358
if (iter->err == 0) {
net/netfilter/nft_set_hash.c
359
iter->err = iter->fn(ctx, set, iter, &he->priv);
net/netfilter/nft_set_hash.c
360
if (iter->err == 0)
net/netfilter/nft_set_hash.c
361
iter->count++;
net/netfilter/nft_set_hash.c
373
struct nft_set_iter *iter)
net/netfilter/nft_set_hash.c
375
switch (iter->type) {
net/netfilter/nft_set_hash.c
379
WARN_ON_ONCE(iter->skip != 0);
net/netfilter/nft_set_hash.c
381
nft_rhash_walk_update(ctx, set, iter);
net/netfilter/nft_set_hash.c
384
nft_rhash_walk_ro(ctx, set, iter);
net/netfilter/nft_set_hash.c
387
iter->err = -EINVAL;
net/netfilter/nft_set_hash.c
752
struct nft_set_iter *iter)
net/netfilter/nft_set_hash.c
761
if (iter->count < iter->skip)
net/netfilter/nft_set_hash.c
764
iter->err = iter->fn(ctx, set, iter, &he->priv);
net/netfilter/nft_set_hash.c
765
if (iter->err < 0)
net/netfilter/nft_set_hash.c
768
iter->count++;
net/netfilter/nft_set_pipapo.c
2136
struct nft_set_iter *iter)
net/netfilter/nft_set_pipapo.c
2150
if (iter->count < iter->skip)
net/netfilter/nft_set_pipapo.c
2155
iter->err = iter->fn(ctx, set, iter, &e->priv);
net/netfilter/nft_set_pipapo.c
2156
if (iter->err < 0)
net/netfilter/nft_set_pipapo.c
2160
iter->count++;
net/netfilter/nft_set_pipapo.c
2174
struct nft_set_iter *iter)
net/netfilter/nft_set_pipapo.c
2179
switch (iter->type) {
net/netfilter/nft_set_pipapo.c
2183
iter->err = -ENOMEM;
net/netfilter/nft_set_pipapo.c
2186
nft_pipapo_do_walk(ctx, set, m, iter);
net/netfilter/nft_set_pipapo.c
2194
nft_pipapo_do_walk(ctx, set, m, iter);
net/netfilter/nft_set_pipapo.c
2199
nft_pipapo_do_walk(ctx, set, m, iter);
net/netfilter/nft_set_pipapo.c
2203
iter->err = -EINVAL;
net/netfilter/nft_set_rbtree.c
844
struct nft_set_iter *iter)
net/netfilter/nft_set_rbtree.c
853
if (iter->count < iter->skip)
net/netfilter/nft_set_rbtree.c
856
iter->err = iter->fn(ctx, set, iter, &rbe->priv);
net/netfilter/nft_set_rbtree.c
857
if (iter->err < 0)
net/netfilter/nft_set_rbtree.c
860
iter->count++;
net/netfilter/nft_set_rbtree.c
866
struct nft_set_iter *iter)
net/netfilter/nft_set_rbtree.c
870
switch (iter->type) {
net/netfilter/nft_set_rbtree.c
873
iter->err = -ENOMEM;
net/netfilter/nft_set_rbtree.c
880
nft_rbtree_do_walk(ctx, set, iter);
net/netfilter/nft_set_rbtree.c
884
nft_rbtree_do_walk(ctx, set, iter);
net/netfilter/nft_set_rbtree.c
888
iter->err = -EINVAL;
net/netlabel/netlabel_addrlist.c
101
list_for_each_entry_rcu(iter, head, list)
net/netlabel/netlabel_addrlist.c
102
if (iter->valid &&
net/netlabel/netlabel_addrlist.c
103
ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0)
net/netlabel/netlabel_addrlist.c
104
return iter;
net/netlabel/netlabel_addrlist.c
125
struct netlbl_af6list *iter;
net/netlabel/netlabel_addrlist.c
127
list_for_each_entry_rcu(iter, head, list)
net/netlabel/netlabel_addrlist.c
128
if (iter->valid &&
net/netlabel/netlabel_addrlist.c
129
ipv6_addr_equal(&iter->addr, addr) &&
net/netlabel/netlabel_addrlist.c
130
ipv6_addr_equal(&iter->mask, mask))
net/netlabel/netlabel_addrlist.c
131
return iter;
net/netlabel/netlabel_addrlist.c
150
struct netlbl_af4list *iter;
net/netlabel/netlabel_addrlist.c
152
iter = netlbl_af4list_search(entry->addr, head);
net/netlabel/netlabel_addrlist.c
153
if (iter != NULL &&
net/netlabel/netlabel_addrlist.c
154
iter->addr == entry->addr && iter->mask == entry->mask)
net/netlabel/netlabel_addrlist.c
161
list_for_each_entry_rcu(iter, head, list)
net/netlabel/netlabel_addrlist.c
162
if (iter->valid &&
net/netlabel/netlabel_addrlist.c
163
ntohl(entry->mask) > ntohl(iter->mask)) {
net/netlabel/netlabel_addrlist.c
165
iter->list.prev,
net/netlabel/netlabel_addrlist.c
166
&iter->list);
net/netlabel/netlabel_addrlist.c
187
struct netlbl_af6list *iter;
net/netlabel/netlabel_addrlist.c
189
iter = netlbl_af6list_search(&entry->addr, head);
net/netlabel/netlabel_addrlist.c
190
if (iter != NULL &&
net/netlabel/netlabel_addrlist.c
191
ipv6_addr_equal(&iter->addr, &entry->addr) &&
net/netlabel/netlabel_addrlist.c
192
ipv6_addr_equal(&iter->mask, &entry->mask))
net/netlabel/netlabel_addrlist.c
199
list_for_each_entry_rcu(iter, head, list)
net/netlabel/netlabel_addrlist.c
200
if (iter->valid &&
net/netlabel/netlabel_addrlist.c
201
ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) {
net/netlabel/netlabel_addrlist.c
203
iter->list.prev,
net/netlabel/netlabel_addrlist.c
204
&iter->list);
net/netlabel/netlabel_addrlist.c
357
int iter = -1;
net/netlabel/netlabel_addrlist.c
358
while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff)
net/netlabel/netlabel_addrlist.c
360
mask_val = ntohl(mask->s6_addr32[iter]);
net/netlabel/netlabel_addrlist.c
49
struct netlbl_af4list *iter;
net/netlabel/netlabel_addrlist.c
51
list_for_each_entry_rcu(iter, head, list)
net/netlabel/netlabel_addrlist.c
52
if (iter->valid && (addr & iter->mask) == iter->addr)
net/netlabel/netlabel_addrlist.c
53
return iter;
net/netlabel/netlabel_addrlist.c
74
struct netlbl_af4list *iter;
net/netlabel/netlabel_addrlist.c
76
list_for_each_entry_rcu(iter, head, list)
net/netlabel/netlabel_addrlist.c
77
if (iter->valid && iter->addr == addr && iter->mask == mask)
net/netlabel/netlabel_addrlist.c
78
return iter;
net/netlabel/netlabel_addrlist.c
99
struct netlbl_af6list *iter;
net/netlabel/netlabel_addrlist.h
149
#define netlbl_af6list_foreach(iter, head) \
net/netlabel/netlabel_addrlist.h
150
for (iter = __af6list_valid((head)->next, head); \
net/netlabel/netlabel_addrlist.h
151
&iter->list != (head); \
net/netlabel/netlabel_addrlist.h
152
iter = __af6list_valid(iter->list.next, head))
net/netlabel/netlabel_addrlist.h
154
#define netlbl_af6list_foreach_rcu(iter, head) \
net/netlabel/netlabel_addrlist.h
155
for (iter = __af6list_valid_rcu((head)->next, head); \
net/netlabel/netlabel_addrlist.h
156
&iter->list != (head); \
net/netlabel/netlabel_addrlist.h
157
iter = __af6list_valid_rcu(iter->list.next, head))
net/netlabel/netlabel_addrlist.h
159
#define netlbl_af6list_foreach_safe(iter, tmp, head) \
net/netlabel/netlabel_addrlist.h
160
for (iter = __af6list_valid((head)->next, head), \
net/netlabel/netlabel_addrlist.h
161
tmp = __af6list_valid(iter->list.next, head); \
net/netlabel/netlabel_addrlist.h
162
&iter->list != (head); \
net/netlabel/netlabel_addrlist.h
163
iter = tmp, tmp = __af6list_valid(iter->list.next, head))
net/netlabel/netlabel_addrlist.h
82
#define netlbl_af4list_foreach(iter, head) \
net/netlabel/netlabel_addrlist.h
83
for (iter = __af4list_valid((head)->next, head); \
net/netlabel/netlabel_addrlist.h
84
&iter->list != (head); \
net/netlabel/netlabel_addrlist.h
85
iter = __af4list_valid(iter->list.next, head))
net/netlabel/netlabel_addrlist.h
87
#define netlbl_af4list_foreach_rcu(iter, head) \
net/netlabel/netlabel_addrlist.h
88
for (iter = __af4list_valid_rcu((head)->next, head); \
net/netlabel/netlabel_addrlist.h
89
&iter->list != (head); \
net/netlabel/netlabel_addrlist.h
90
iter = __af4list_valid_rcu(iter->list.next, head))
net/netlabel/netlabel_addrlist.h
92
#define netlbl_af4list_foreach_safe(iter, tmp, head) \
net/netlabel/netlabel_addrlist.h
93
for (iter = __af4list_valid((head)->next, head), \
net/netlabel/netlabel_addrlist.h
94
tmp = __af4list_valid(iter->list.next, head); \
net/netlabel/netlabel_addrlist.h
95
&iter->list != (head); \
net/netlabel/netlabel_addrlist.h
96
iter = tmp, tmp = __af4list_valid(iter->list.next, head))
net/netlabel/netlabel_cipso_v4.c
100
while (iter < CIPSO_V4_TAG_MAXCNT)
net/netlabel/netlabel_cipso_v4.c
101
doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID;
net/netlabel/netlabel_cipso_v4.c
130
u32 iter;
net/netlabel/netlabel_cipso_v4.c
202
for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++)
net/netlabel/netlabel_cipso_v4.c
203
doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL;
net/netlabel/netlabel_cipso_v4.c
204
for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++)
net/netlabel/netlabel_cipso_v4.c
205
doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL;
net/netlabel/netlabel_cipso_v4.c
279
for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++)
net/netlabel/netlabel_cipso_v4.c
280
doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT;
net/netlabel/netlabel_cipso_v4.c
281
for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++)
net/netlabel/netlabel_cipso_v4.c
282
doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT;
net/netlabel/netlabel_cipso_v4.c
458
u32 iter;
net/netlabel/netlabel_cipso_v4.c
496
for (iter = 0;
net/netlabel/netlabel_cipso_v4.c
497
iter < CIPSO_V4_TAG_MAXCNT &&
net/netlabel/netlabel_cipso_v4.c
498
doi_def->tags[iter] != CIPSO_V4_TAG_INVALID;
net/netlabel/netlabel_cipso_v4.c
499
iter++) {
net/netlabel/netlabel_cipso_v4.c
502
doi_def->tags[iter]);
net/netlabel/netlabel_cipso_v4.c
516
for (iter = 0;
net/netlabel/netlabel_cipso_v4.c
517
iter < doi_def->map.std->lvl.local_size;
net/netlabel/netlabel_cipso_v4.c
518
iter++) {
net/netlabel/netlabel_cipso_v4.c
519
if (doi_def->map.std->lvl.local[iter] ==
net/netlabel/netlabel_cipso_v4.c
531
iter);
net/netlabel/netlabel_cipso_v4.c
536
doi_def->map.std->lvl.local[iter]);
net/netlabel/netlabel_cipso_v4.c
549
for (iter = 0;
net/netlabel/netlabel_cipso_v4.c
550
iter < doi_def->map.std->cat.local_size;
net/netlabel/netlabel_cipso_v4.c
551
iter++) {
net/netlabel/netlabel_cipso_v4.c
552
if (doi_def->map.std->cat.local[iter] ==
net/netlabel/netlabel_cipso_v4.c
564
iter);
net/netlabel/netlabel_cipso_v4.c
569
doi_def->map.std->cat.local[iter]);
net/netlabel/netlabel_cipso_v4.c
84
u32 iter = 0;
net/netlabel/netlabel_cipso_v4.c
96
if (iter >= CIPSO_V4_TAG_MAXCNT)
net/netlabel/netlabel_cipso_v4.c
98
doi_def->tags[iter++] = nla_get_u8(nla);
net/netlabel/netlabel_domainhash.c
107
u32 iter;
net/netlabel/netlabel_domainhash.c
114
for (iter = 0, val = 0, len = strlen(key); iter < len; iter++)
net/netlabel/netlabel_domainhash.c
115
val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter];
net/netlabel/netlabel_domainhash.c
142
struct netlbl_dom_map *iter;
net/netlabel/netlabel_domainhash.c
147
list_for_each_entry_rcu(iter, bkt_list, list,
net/netlabel/netlabel_domainhash.c
149
if (iter->valid &&
net/netlabel/netlabel_domainhash.c
150
netlbl_family_match(iter->family, family) &&
net/netlabel/netlabel_domainhash.c
151
strcmp(iter->domain, domain) == 0)
net/netlabel/netlabel_domainhash.c
152
return iter;
net/netlabel/netlabel_domainhash.c
364
u32 iter;
net/netlabel/netlabel_domainhash.c
379
for (iter = 0; iter < hsh_tbl->size; iter++)
net/netlabel/netlabel_domainhash.c
380
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
net/netlabel/netlabel_domainhash.h
43
#define netlbl_domhsh_addr4_entry(iter) \
net/netlabel/netlabel_domainhash.h
44
container_of(iter, struct netlbl_domaddr4_map, list)
net/netlabel/netlabel_domainhash.h
50
#define netlbl_domhsh_addr6_entry(iter) \
net/netlabel/netlabel_domainhash.h
51
container_of(iter, struct netlbl_domaddr6_map, list)
net/netlabel/netlabel_kapi.c
559
struct netlbl_lsm_catmap *iter = *catmap;
net/netlabel/netlabel_kapi.c
562
if (iter == NULL)
net/netlabel/netlabel_kapi.c
564
if (offset < iter->startbit)
net/netlabel/netlabel_kapi.c
566
while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) {
net/netlabel/netlabel_kapi.c
567
prev = iter;
net/netlabel/netlabel_kapi.c
568
iter = iter->next;
net/netlabel/netlabel_kapi.c
570
if (iter == NULL || offset < iter->startbit)
net/netlabel/netlabel_kapi.c
573
return iter;
net/netlabel/netlabel_kapi.c
577
return iter;
net/netlabel/netlabel_kapi.c
582
iter = netlbl_catmap_alloc(gfp_flags);
net/netlabel/netlabel_kapi.c
583
if (iter == NULL)
net/netlabel/netlabel_kapi.c
585
iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1);
net/netlabel/netlabel_kapi.c
588
iter->next = *catmap;
net/netlabel/netlabel_kapi.c
589
*catmap = iter;
net/netlabel/netlabel_kapi.c
591
iter->next = prev->next;
net/netlabel/netlabel_kapi.c
592
prev->next = iter;
net/netlabel/netlabel_kapi.c
595
return iter;
net/netlabel/netlabel_kapi.c
610
struct netlbl_lsm_catmap *iter;
net/netlabel/netlabel_kapi.c
615
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
net/netlabel/netlabel_kapi.c
616
if (iter == NULL)
net/netlabel/netlabel_kapi.c
618
if (offset > iter->startbit) {
net/netlabel/netlabel_kapi.c
619
offset -= iter->startbit;
net/netlabel/netlabel_kapi.c
626
bitmap = iter->bitmap[idx] >> bit;
net/netlabel/netlabel_kapi.c
634
return iter->startbit +
net/netlabel/netlabel_kapi.c
638
if (iter->next != NULL) {
net/netlabel/netlabel_kapi.c
639
iter = iter->next;
net/netlabel/netlabel_kapi.c
644
bitmap = iter->bitmap[idx];
net/netlabel/netlabel_kapi.c
665
struct netlbl_lsm_catmap *iter;
net/netlabel/netlabel_kapi.c
672
iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0);
net/netlabel/netlabel_kapi.c
673
if (iter == NULL)
net/netlabel/netlabel_kapi.c
675
if (offset > iter->startbit) {
net/netlabel/netlabel_kapi.c
676
offset -= iter->startbit;
net/netlabel/netlabel_kapi.c
686
bitmap = iter->bitmap[idx];
net/netlabel/netlabel_kapi.c
695
return iter->startbit +
net/netlabel/netlabel_kapi.c
698
if (iter->next == NULL)
net/netlabel/netlabel_kapi.c
699
return iter->startbit + NETLBL_CATMAP_SIZE - 1;
net/netlabel/netlabel_kapi.c
700
prev = iter;
net/netlabel/netlabel_kapi.c
701
iter = iter->next;
net/netlabel/netlabel_kapi.c
729
struct netlbl_lsm_catmap *iter;
net/netlabel/netlabel_kapi.c
747
iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_WALK, 0);
net/netlabel/netlabel_kapi.c
748
if (iter == NULL) {
net/netlabel/netlabel_kapi.c
753
if (off < iter->startbit) {
net/netlabel/netlabel_kapi.c
754
*offset = iter->startbit;
net/netlabel/netlabel_kapi.c
757
off -= iter->startbit;
net/netlabel/netlabel_kapi.c
759
*bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_MAPSIZE);
net/netlabel/netlabel_kapi.c
779
struct netlbl_lsm_catmap *iter;
net/netlabel/netlabel_kapi.c
782
iter = _netlbl_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags);
net/netlabel/netlabel_kapi.c
783
if (iter == NULL)
net/netlabel/netlabel_kapi.c
786
bit -= iter->startbit;
net/netlabel/netlabel_kapi.c
788
iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE);
net/netlabel/netlabel_kapi.c
847
struct netlbl_lsm_catmap *iter;
net/netlabel/netlabel_kapi.c
854
iter = _netlbl_catmap_getnode(catmap, offset, _CM_F_ALLOC, flags);
net/netlabel/netlabel_kapi.c
855
if (iter == NULL)
net/netlabel/netlabel_kapi.c
858
offset -= iter->startbit;
net/netlabel/netlabel_kapi.c
860
iter->bitmap[idx] |= (u64)bitmap
net/netlabel/netlabel_unlabeled.c
1410
u32 iter;
net/netlabel/netlabel_unlabeled.c
1425
for (iter = 0; iter < hsh_tbl->size; iter++)
net/netlabel/netlabel_unlabeled.c
1426
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
net/netlabel/netlabel_unlabeled.c
206
struct netlbl_unlhsh_iface *iter;
net/netlabel/netlabel_unlabeled.c
210
list_for_each_entry_rcu(iter, bkt_list, list,
net/netlabel/netlabel_unlabeled.c
212
if (iter->valid && iter->ifindex == ifindex)
net/netlabel/netlabel_unlabeled.c
213
return iter;
net/netlabel/netlabel_unlabeled.c
66
#define netlbl_unlhsh_addr4_entry(iter) \
net/netlabel/netlabel_unlabeled.c
67
container_of(iter, struct netlbl_unlhsh_addr4, list)
net/netlabel/netlabel_unlabeled.c
74
#define netlbl_unlhsh_addr6_entry(iter) \
net/netlabel/netlabel_unlabeled.c
75
container_of(iter, struct netlbl_unlhsh_addr6, list)
net/netlink/af_netlink.c
2617
static void netlink_walk_start(struct nl_seq_iter *iter)
net/netlink/af_netlink.c
2619
rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti);
net/netlink/af_netlink.c
2620
rhashtable_walk_start(&iter->hti);
net/netlink/af_netlink.c
2623
static void netlink_walk_stop(struct nl_seq_iter *iter)
net/netlink/af_netlink.c
2625
rhashtable_walk_stop(&iter->hti);
net/netlink/af_netlink.c
2626
rhashtable_walk_exit(&iter->hti);
net/netlink/af_netlink.c
2631
struct nl_seq_iter *iter = seq->private;
net/netlink/af_netlink.c
2636
nlk = rhashtable_walk_next(&iter->hti);
net/netlink/af_netlink.c
2648
netlink_walk_stop(iter);
net/netlink/af_netlink.c
2649
if (++iter->link >= MAX_LINKS)
net/netlink/af_netlink.c
2652
netlink_walk_start(iter);
net/netlink/af_netlink.c
2662
struct nl_seq_iter *iter = seq->private;
net/netlink/af_netlink.c
2666
iter->link = 0;
net/netlink/af_netlink.c
2668
netlink_walk_start(iter);
net/netlink/af_netlink.c
2684
struct nl_seq_iter *iter = seq->private;
net/netlink/af_netlink.c
2686
if (iter->link >= MAX_LINKS)
net/netlink/af_netlink.c
2689
netlink_walk_stop(iter);
net/netlink/genetlink.c
214
static void genl_op_from_split(struct genl_op_iter *iter)
net/netlink/genetlink.c
216
const struct genl_family *family = iter->family;
net/netlink/genetlink.c
219
i = iter->entry_idx - family->n_ops - family->n_small_ops;
net/netlink/genetlink.c
222
iter->doit = family->split_ops[i + cnt];
net/netlink/genetlink.c
223
genl_op_fill_in_reject_policy_split(family, &iter->doit);
net/netlink/genetlink.c
226
memset(&iter->doit, 0, sizeof(iter->doit));
net/netlink/genetlink.c
231
(!cnt || family->split_ops[i + cnt].cmd == iter->doit.cmd)) {
net/netlink/genetlink.c
232
iter->dumpit = family->split_ops[i + cnt];
net/netlink/genetlink.c
233
genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
net/netlink/genetlink.c
236
memset(&iter->dumpit, 0, sizeof(iter->dumpit));
net/netlink/genetlink.c
240
iter->entry_idx += cnt;
net/netlink/genetlink.c
337
genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
net/netlink/genetlink.c
339
iter->family = family;
net/netlink/genetlink.c
340
iter->cmd_idx = 0;
net/netlink/genetlink.c
341
iter->entry_idx = 0;
net/netlink/genetlink.c
343
iter->flags = 0;
net/netlink/genetlink.c
345
return iter->family->n_ops +
net/netlink/genetlink.c
346
iter->family->n_small_ops +
net/netlink/genetlink.c
347
iter->family->n_split_ops;
net/netlink/genetlink.c
350
static bool genl_op_iter_next(struct genl_op_iter *iter)
net/netlink/genetlink.c
352
const struct genl_family *family = iter->family;
net/netlink/genetlink.c
356
if (iter->entry_idx < family->n_ops) {
net/netlink/genetlink.c
357
genl_op_from_full(family, iter->entry_idx, &op);
net/netlink/genetlink.c
358
} else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
net/netlink/genetlink.c
359
genl_op_from_small(family, iter->entry_idx - family->n_ops,
net/netlink/genetlink.c
361
} else if (iter->entry_idx <
net/netlink/genetlink.c
365
genl_op_from_split(iter);
net/netlink/genetlink.c
370
iter->cmd_idx++;
net/netlink/genetlink.c
373
iter->entry_idx++;
net/netlink/genetlink.c
375
genl_cmd_full_to_split(&iter->doit, family,
net/netlink/genetlink.c
377
genl_cmd_full_to_split(&iter->dumpit, family,
net/netlink/genetlink.c
381
iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
net/netlink/genetlink.c
382
iter->flags = iter->doit.flags | iter->dumpit.flags;
net/netlink/genetlink.c
393
static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
net/netlink/genetlink.c
395
return iter->cmd_idx;
net/nfc/netlink.c
1367
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
1371
if (!iter) {
net/nfc/netlink.c
1373
iter = kmalloc_obj(struct class_dev_iter);
net/nfc/netlink.c
1374
if (!iter)
net/nfc/netlink.c
1376
cb->args[0] = (long) iter;
net/nfc/netlink.c
1384
nfc_device_iter_init(iter);
net/nfc/netlink.c
1385
dev = nfc_device_iter_next(iter);
net/nfc/netlink.c
1396
dev = nfc_device_iter_next(iter);
net/nfc/netlink.c
1408
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
1410
if (iter) {
net/nfc/netlink.c
1411
nfc_device_iter_exit(iter);
net/nfc/netlink.c
1412
kfree(iter);
net/nfc/netlink.c
1837
struct class_dev_iter iter;
net/nfc/netlink.c
1844
nfc_device_iter_init(&iter);
net/nfc/netlink.c
1845
dev = nfc_device_iter_next(&iter);
net/nfc/netlink.c
1857
dev = nfc_device_iter_next(&iter);
net/nfc/netlink.c
1860
nfc_device_iter_exit(&iter);
net/nfc/netlink.c
601
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
605
if (!iter) {
net/nfc/netlink.c
607
iter = kmalloc_obj(struct class_dev_iter);
net/nfc/netlink.c
608
if (!iter)
net/nfc/netlink.c
610
cb->args[0] = (long) iter;
net/nfc/netlink.c
618
nfc_device_iter_init(iter);
net/nfc/netlink.c
619
dev = nfc_device_iter_next(iter);
net/nfc/netlink.c
630
dev = nfc_device_iter_next(iter);
net/nfc/netlink.c
642
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
net/nfc/netlink.c
644
if (iter) {
net/nfc/netlink.c
645
nfc_device_iter_exit(iter);
net/nfc/netlink.c
646
kfree(iter);
net/nfc/nfc.h
106
static inline void nfc_device_iter_init(struct class_dev_iter *iter)
net/nfc/nfc.h
108
class_dev_iter_init(iter, &nfc_class, NULL, NULL);
net/nfc/nfc.h
111
static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter)
net/nfc/nfc.h
113
struct device *d = class_dev_iter_next(iter);
net/nfc/nfc.h
120
static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
net/nfc/nfc.h
122
class_dev_iter_exit(iter);
net/qrtr/af_qrtr.c
171
struct radix_tree_iter iter;
net/qrtr/af_qrtr.c
181
radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
net/qrtr/af_qrtr.c
183
radix_tree_iter_delete(&qrtr_nodes, &iter, slot);
net/qrtr/af_qrtr.c
621
struct radix_tree_iter iter;
net/qrtr/af_qrtr.c
635
radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
net/qrtr/af_qrtr.c
638
src.sq_node = iter.index;
net/rds/af_rds.c
729
struct rds_info_iterator *iter,
net/rds/af_rds.c
751
rds_inc_info_copy(inc, iter,
net/rds/af_rds.c
768
struct rds_info_iterator *iter,
net/rds/af_rds.c
785
rds6_inc_info_copy(inc, iter, &inc->i_saddr,
net/rds/af_rds.c
800
struct rds_info_iterator *iter,
net/rds/af_rds.c
828
rds_info_copy(iter, &sinfo, sizeof(sinfo));
net/rds/af_rds.c
841
struct rds_info_iterator *iter,
net/rds/af_rds.c
863
rds_info_copy(iter, &sinfo6, sizeof(sinfo6));
net/rds/connection.c
554
struct rds_info_iterator *iter,
net/rds/connection.c
559
rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
net/rds/connection.c
562
rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
net/rds/connection.c
567
struct rds_info_iterator *iter,
net/rds/connection.c
613
iter,
net/rds/connection.c
633
struct rds_info_iterator *iter,
net/rds/connection.c
637
rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
net/rds/connection.c
642
struct rds_info_iterator *iter,
net/rds/connection.c
646
rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
net/rds/connection.c
651
struct rds_info_iterator *iter,
net/rds/connection.c
654
rds_conn_message_info(sock, len, iter, lens, 1);
net/rds/connection.c
659
struct rds_info_iterator *iter,
net/rds/connection.c
662
rds6_conn_message_info(sock, len, iter, lens, 1);
net/rds/connection.c
668
struct rds_info_iterator *iter,
net/rds/connection.c
671
rds_conn_message_info(sock, len, iter, lens, 0);
net/rds/connection.c
677
struct rds_info_iterator *iter,
net/rds/connection.c
680
rds6_conn_message_info(sock, len, iter, lens, 0);
net/rds/connection.c
685
struct rds_info_iterator *iter,
net/rds/connection.c
712
rds_info_copy(iter, buffer, item_len);
net/rds/connection.c
723
struct rds_info_iterator *iter,
net/rds/connection.c
762
rds_info_copy(iter, buffer, item_len);
net/rds/connection.c
829
struct rds_info_iterator *iter,
net/rds/connection.c
834
rds_walk_conn_path_info(sock, len, iter, lens,
net/rds/connection.c
842
struct rds_info_iterator *iter,
net/rds/connection.c
847
rds_walk_conn_path_info(sock, len, iter, lens,
net/rds/ib.c
368
struct rds_info_iterator *iter,
net/rds/ib.c
373
rds_for_each_conn_info(sock, len, iter, lens,
net/rds/ib.c
382
struct rds_info_iterator *iter,
net/rds/ib.c
387
rds_for_each_conn_info(sock, len, iter, lens,
net/rds/ib.h
441
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
net/rds/ib_rdma.c
439
int iter = 0;
net/rds/ib_rdma.c
451
if (++iter > 2) {
net/rds/ib_stats.c
103
rds_stats_info_copy(iter, (uint64_t *)&stats, rds_ib_stat_names,
net/rds/ib_stats.c
84
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
net/rds/info.c
104
void rds_info_iter_unmap(struct rds_info_iterator *iter)
net/rds/info.c
106
if (iter->addr) {
net/rds/info.c
107
kunmap_atomic(iter->addr);
net/rds/info.c
108
iter->addr = NULL;
net/rds/info.c
115
void rds_info_copy(struct rds_info_iterator *iter, void *data,
net/rds/info.c
121
if (!iter->addr)
net/rds/info.c
122
iter->addr = kmap_atomic(*iter->pages);
net/rds/info.c
124
this = min(bytes, PAGE_SIZE - iter->offset);
net/rds/info.c
127
"bytes %lu\n", *iter->pages, iter->addr,
net/rds/info.c
128
iter->offset, this, data, bytes);
net/rds/info.c
130
memcpy(iter->addr + iter->offset, data, this);
net/rds/info.c
134
iter->offset += this;
net/rds/info.c
136
if (iter->offset == PAGE_SIZE) {
net/rds/info.c
137
kunmap_atomic(iter->addr);
net/rds/info.c
138
iter->addr = NULL;
net/rds/info.c
139
iter->offset = 0;
net/rds/info.c
140
iter->pages++;
net/rds/info.c
161
struct rds_info_iterator iter;
net/rds/info.c
214
iter.pages = pages;
net/rds/info.c
215
iter.addr = NULL;
net/rds/info.c
216
iter.offset = start & (PAGE_SIZE - 1);
net/rds/info.c
218
func(sock, len, &iter, &lens);
net/rds/info.c
223
rds_info_iter_unmap(&iter);
net/rds/info.h
19
struct rds_info_iterator *iter,
net/rds/info.h
26
void rds_info_copy(struct rds_info_iterator *iter, void *data,
net/rds/info.h
28
void rds_info_iter_unmap(struct rds_info_iterator *iter);
net/rds/rds.h
1008
void rds_stats_info_copy(struct rds_info_iterator *iter,
net/rds/rds.h
1046
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
net/rds/rds.h
598
unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
net/rds/rds.h
817
struct rds_info_iterator *iter,
net/rds/rds.h
937
struct rds_info_iterator *iter,
net/rds/rds.h
940
struct rds_info_iterator *iter,
net/rds/recv.c
820
struct rds_info_iterator *iter,
net/rds/recv.c
843
rds_info_copy(iter, &minfo, sizeof(minfo));
net/rds/recv.c
848
struct rds_info_iterator *iter,
net/rds/recv.c
872
rds_info_copy(iter, &minfo6, sizeof(minfo6));
net/rds/stats.c
112
struct rds_info_iterator *iter,
net/rds/stats.c
136
rds_stats_info_copy(iter, (uint64_t *)&stats, rds_stat_names,
net/rds/stats.c
142
lens->nr = rds_trans_stats_info_copy(iter, avail) +
net/rds/stats.c
85
void rds_stats_info_copy(struct rds_info_iterator *iter,
net/rds/stats.c
96
rds_info_copy(iter, &ctr, sizeof(ctr));
net/rds/tcp.c
235
struct rds_info_iterator *iter,
net/rds/tcp.c
265
rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
net/rds/tcp.c
281
struct rds_info_iterator *iter,
net/rds/tcp.c
308
rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6));
net/rds/tcp.h
117
unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
net/rds/tcp_stats.c
51
unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
net/rds/tcp_stats.c
70
rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names,
net/rds/transport.c
144
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
net/rds/transport.c
153
rds_info_iter_unmap(iter);
net/rds/transport.c
161
part = trans->stats_info_copy(iter, avail);
net/rxrpc/recvmsg.c
244
struct msghdr *msg, struct iov_iter *iter,
net/rxrpc/recvmsg.c
307
ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
net/rxrpc/recvmsg.c
604
struct iov_iter *iter, size_t *_len,
net/rxrpc/recvmsg.c
614
ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
net/rxrpc/recvmsg.c
626
if (iov_iter_count(iter) > 0)
net/rxrpc/recvmsg.c
644
_leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
net/rxrpc/recvmsg.c
664
if (iov_iter_count(iter) > 0)
net/rxrpc/rxperf.c
166
iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
net/rxrpc/rxperf.c
364
ret = rxrpc_kernel_recv_data(rxperf_socket, call->rxcall, &call->iter,
net/rxrpc/rxperf.c
368
iov_iter_count(&call->iter), call->iov_len, want_more, ret);
net/rxrpc/rxperf.c
449
iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
net/rxrpc/rxperf.c
482
iov_iter_discard(&call->iter, READ, call->req_len);
net/rxrpc/rxperf.c
494
iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len);
net/rxrpc/rxperf.c
50
struct iov_iter iter;
net/sched/cls_api.c
201
struct tcf_proto *iter;
net/sched/cls_api.c
205
hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
net/sched/cls_api.c
207
if (tcf_proto_cmp(tp, iter)) {
net/sctp/proc.c
205
struct sctp_ht_iter *iter = seq->private;
net/sctp/proc.c
207
sctp_transport_walk_start(&iter->hti);
net/sctp/proc.c
209
return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
net/sctp/proc.c
214
struct sctp_ht_iter *iter = seq->private;
net/sctp/proc.c
222
sctp_transport_walk_stop(&iter->hti);
net/sctp/proc.c
227
struct sctp_ht_iter *iter = seq->private;
net/sctp/proc.c
237
return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
net/sctp/socket.c
5304
void sctp_transport_walk_start(struct rhashtable_iter *iter) __acquires(RCU)
net/sctp/socket.c
5306
rhltable_walk_enter(&sctp_transport_hashtable, iter);
net/sctp/socket.c
5308
rhashtable_walk_start(iter);
net/sctp/socket.c
5311
void sctp_transport_walk_stop(struct rhashtable_iter *iter) __releases(RCU)
net/sctp/socket.c
5313
rhashtable_walk_stop(iter);
net/sctp/socket.c
5314
rhashtable_walk_exit(iter);
net/sctp/socket.c
5318
struct rhashtable_iter *iter)
net/sctp/socket.c
5322
t = rhashtable_walk_next(iter);
net/sctp/socket.c
5323
for (; t; t = rhashtable_walk_next(iter)) {
net/sctp/socket.c
5344
struct rhashtable_iter *iter,
net/sctp/socket.c
5352
while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
net/sctp/tsnmap.c
143
struct sctp_tsnmap_iter *iter)
net/sctp/tsnmap.c
146
iter->start = map->cumulative_tsn_ack_point + 1;
net/sctp/tsnmap.c
153
struct sctp_tsnmap_iter *iter,
net/sctp/tsnmap.c
160
if (TSN_lte(map->max_tsn_seen, iter->start))
net/sctp/tsnmap.c
163
offset = iter->start - map->base_tsn;
net/sctp/tsnmap.c
182
iter->start = map->cumulative_tsn_ack_point + *end + 1;
net/sctp/tsnmap.c
320
struct sctp_tsnmap_iter iter;
net/sctp/tsnmap.c
326
sctp_tsnmap_iter_init(map, &iter);
net/sctp/tsnmap.c
327
while (sctp_tsnmap_next_gap_ack(map, &iter,
net/sunrpc/xprtrdma/svc_rdma_rw.c
153
struct bvec_iter iter = {
net/sunrpc/xprtrdma/svc_rdma_rw.c
161
iter, offset, handle, direction);
net/switchdev/switchdev.c
640
struct list_head *iter;
net/switchdev/switchdev.c
649
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/switchdev/switchdev.c
722
struct list_head *iter;
net/switchdev/switchdev.c
741
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/switchdev/switchdev.c
835
struct list_head *iter;
net/switchdev/switchdev.c
852
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/switchdev/switchdev.c
944
struct list_head *iter;
net/switchdev/switchdev.c
963
netdev_for_each_lower_dev(dev, lower_dev, iter) {
net/tipc/socket.c
2967
struct rhashtable_iter iter;
net/tipc/socket.c
2971
rhashtable_walk_enter(&tn->sk_rht, &iter);
net/tipc/socket.c
2974
rhashtable_walk_start(&iter);
net/tipc/socket.c
2976
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
net/tipc/socket.c
2978
rhashtable_walk_stop(&iter);
net/tipc/socket.c
2984
rhashtable_walk_start(&iter);
net/tipc/socket.c
2988
rhashtable_walk_stop(&iter);
net/tipc/socket.c
2991
rhashtable_walk_exit(&iter);
net/tipc/socket.c
3556
struct rhashtable_iter *iter = (void *)cb->args[4];
net/tipc/socket.c
3560
rhashtable_walk_start(iter);
net/tipc/socket.c
3561
while ((tsk = rhashtable_walk_next(iter)) != NULL) {
net/tipc/socket.c
3569
rhashtable_walk_stop(iter);
net/tipc/socket.c
3578
rhashtable_walk_start(iter);
net/tipc/socket.c
3581
rhashtable_walk_stop(iter);
net/tipc/socket.c
3596
struct rhashtable_iter *iter = (void *)cb->args[4];
net/tipc/socket.c
3599
if (!iter) {
net/tipc/socket.c
3600
iter = kmalloc_obj(*iter);
net/tipc/socket.c
3601
if (!iter)
net/tipc/socket.c
3604
cb->args[4] = (long)iter;
net/tipc/socket.c
3607
rhashtable_walk_enter(&tn->sk_rht, iter);
net/tls/tls_device.c
422
struct iov_iter *iter,
net/tls/tls_device.c
501
rc = iov_iter_extract_pages(iter, &pages,
net/tls/tls_device.c
511
iov_iter_revert(iter, copy);
net/tls/tls_device.c
524
iter);
net/tls/tls_device.c
596
struct iov_iter iter = {};
net/tls/tls_device.c
605
iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0);
net/tls/tls_device.c
606
tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA);
net/tls/tls_device.c
675
struct iov_iter iter;
net/tls/tls_device.c
677
iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0);
net/tls/tls_device.c
678
return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA);
net/tls/tls_strp.c
162
struct sk_buff *iter, *clone;
net/tls/tls_strp.c
167
iter = shinfo->frag_list;
net/tls/tls_strp.c
170
if (iter->len <= offset) {
net/tls/tls_strp.c
171
offset -= iter->len;
net/tls/tls_strp.c
175
chunk = iter->len - offset;
net/tls/tls_strp.c
178
clone = skb_clone(iter, strp->sk->sk_allocation);
net/tls/tls_strp.c
185
iter = iter->next;
net/unix/af_unix.c
3615
struct bpf_unix_iter_state *iter = seq->private;
net/unix/af_unix.c
3620
iter->batch[iter->end_sk++] = start_sk;
net/unix/af_unix.c
3623
if (iter->end_sk < iter->max_sk) {
net/unix/af_unix.c
3625
iter->batch[iter->end_sk++] = sk;
net/unix/af_unix.c
3636
static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
net/unix/af_unix.c
3638
while (iter->cur_sk < iter->end_sk)
net/unix/af_unix.c
3639
sock_put(iter->batch[iter->cur_sk++]);
net/unix/af_unix.c
3642
static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
net/unix/af_unix.c
3652
bpf_iter_unix_put_batch(iter);
net/unix/af_unix.c
3653
kvfree(iter->batch);
net/unix/af_unix.c
3654
iter->batch = new_batch;
net/unix/af_unix.c
3655
iter->max_sk = new_batch_sz;
net/unix/af_unix.c
3663
struct bpf_unix_iter_state *iter = seq->private;
net/unix/af_unix.c
3668
if (iter->st_bucket_done)
net/unix/af_unix.c
3673
iter->cur_sk = 0;
net/unix/af_unix.c
3674
iter->end_sk = 0;
net/unix/af_unix.c
3682
if (iter->end_sk == expected) {
net/unix/af_unix.c
3683
iter->st_bucket_done = true;
net/unix/af_unix.c
3687
if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
net/unix/af_unix.c
3708
struct bpf_unix_iter_state *iter = seq->private;
net/unix/af_unix.c
3715
if (iter->cur_sk < iter->end_sk)
net/unix/af_unix.c
3716
sock_put(iter->batch[iter->cur_sk++]);
net/unix/af_unix.c
3720
if (iter->cur_sk < iter->end_sk)
net/unix/af_unix.c
3721
sk = iter->batch[iter->cur_sk];
net/unix/af_unix.c
3758
struct bpf_unix_iter_state *iter = seq->private;
net/unix/af_unix.c
3769
if (iter->cur_sk < iter->end_sk)
net/unix/af_unix.c
3770
bpf_iter_unix_put_batch(iter);
net/unix/af_unix.c
3853
struct bpf_unix_iter_state *iter = priv_data;
net/unix/af_unix.c
3860
err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
net/unix/af_unix.c
3871
struct bpf_unix_iter_state *iter = priv_data;
net/unix/af_unix.c
3874
kvfree(iter->batch);
net/vmw_vsock/virtio_transport_common.c
86
struct iov_iter *iter = &msg->msg_iter;
net/vmw_vsock/virtio_transport_common.c
90
iter->count,
net/wireless/scan.c
3382
void (*iter)(struct wiphy *wiphy,
net/wireless/scan.c
3395
iter(wiphy, &bss->pub, iter_data);
net/wireless/scan.c
641
(*iter)(void *data, u8 type,
net/wireless/scan.c
676
switch (iter(iter_data, type, info,
net/wireless/util.c
2437
void (*iter)(const struct ieee80211_iface_combination *c,
net/wireless/util.c
2546
(*iter)(c, data);
net/xfrm/xfrm_interface_core.c
226
struct xfrm_if *iter;
net/xfrm/xfrm_interface_core.c
229
(iter = rtnl_dereference(*xip)) != NULL;
net/xfrm/xfrm_interface_core.c
230
xip = &iter->next) {
net/xfrm/xfrm_interface_core.c
231
if (xi == iter) {
scripts/kconfig/gconf.c
100
valid = gtk_tree_model_iter_children(model, &iter, parent);
scripts/kconfig/gconf.c
104
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
1130
static gboolean visible_func(GtkTreeModel *model, GtkTreeIter *iter,
scripts/kconfig/gconf.c
1135
gtk_tree_model_get(model, iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
114
path = gtk_tree_model_get_path(model, &iter);
scripts/kconfig/gconf.c
121
gtk_tree_selection_select_iter(selection, &iter);
scripts/kconfig/gconf.c
126
_select_menu(view, model, &iter, match);
scripts/kconfig/gconf.c
128
valid = gtk_tree_model_iter_next(model, &iter);
scripts/kconfig/gconf.c
252
GtkTreeIter iter;
scripts/kconfig/gconf.c
255
valid = gtk_tree_model_iter_children(model, &iter, parent);
scripts/kconfig/gconf.c
259
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
262
set_node(store, &iter, menu);
scripts/kconfig/gconf.c
264
_update_tree(store, &iter);
scripts/kconfig/gconf.c
266
valid = gtk_tree_model_iter_next(model, &iter);
scripts/kconfig/gconf.c
662
GtkTreeIter iter;
scripts/kconfig/gconf.c
667
if (!gtk_tree_model_get_iter(model, &iter, path))
scripts/kconfig/gconf.c
670
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
673
gtk_tree_model_get(model, &iter, COL_VALUE, &old_def, -1);
scripts/kconfig/gconf.c
752
GtkTreeIter iter;
scripts/kconfig/gconf.c
762
if (!gtk_tree_model_get_iter(model, &iter, path))
scripts/kconfig/gconf.c
764
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
804
GtkTreeIter iter;
scripts/kconfig/gconf.c
820
gtk_tree_model_get_iter(model, &iter, path);
scripts/kconfig/gconf.c
821
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
843
GtkTreeIter iter;
scripts/kconfig/gconf.c
847
if (gtk_tree_selection_get_selected(selection, &model, &iter)) {
scripts/kconfig/gconf.c
848
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
862
GtkTreeIter iter;
scripts/kconfig/gconf.c
871
gtk_tree_model_get_iter(model, &iter, path);
scripts/kconfig/gconf.c
872
gtk_tree_model_get(model, &iter, COL_MENU, &menu, -1);
scripts/kconfig/gconf.c
895
GtkTreeIter iter;
scripts/kconfig/gconf.c
915
gtk_tree_store_append(tree, &iter, parent);
scripts/kconfig/gconf.c
916
set_node(tree, &iter, child);
scripts/kconfig/gconf.c
919
_display_tree(tree, child, &iter);
scripts/kconfig/gconf.c
97
GtkTreeIter iter;
security/lsm_init.c
33
#define lsm_order_for_each(iter) \
security/lsm_init.c
34
for ((iter) = lsm_order; *(iter); (iter)++)
security/lsm_init.c
35
#define lsm_for_each_raw(iter) \
security/lsm_init.c
36
for ((iter) = __start_lsm_info; \
security/lsm_init.c
37
(iter) < __end_lsm_info; (iter)++)
security/lsm_init.c
38
#define lsm_early_for_each_raw(iter) \
security/lsm_init.c
39
for ((iter) = __start_early_lsm_info; \
security/lsm_init.c
40
(iter) < __end_early_lsm_info; (iter)++)
security/selinux/hooks.c
3156
int orig, iter;
security/selinux/hooks.c
3168
orig = iter = tsec->avdcache.dir_spot;
security/selinux/hooks.c
3170
if (tsec->avdcache.dir[iter].isid == isec->sid) {
security/selinux/hooks.c
3172
tsec->avdcache.dir_spot = iter;
security/selinux/hooks.c
3173
*avdc = &tsec->avdcache.dir[iter];
security/selinux/hooks.c
3176
iter = (iter - 1) & (TSEC_AVDC_DIR_SIZE - 1);
security/selinux/hooks.c
3177
} while (iter != orig);
security/selinux/ibpkey.c
224
int iter;
security/selinux/ibpkey.c
229
for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) {
security/selinux/ibpkey.c
230
INIT_LIST_HEAD(&sel_ib_pkey_hash[iter].list);
security/selinux/ibpkey.c
231
sel_ib_pkey_hash[iter].size = 0;
security/selinux/netnode.c
296
int iter;
security/selinux/netnode.c
301
for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
security/selinux/netnode.c
302
INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
security/selinux/netnode.c
303
sel_netnode_hash[iter].size = 0;
security/selinux/netport.c
224
int iter;
security/selinux/netport.c
229
for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
security/selinux/netport.c
230
INIT_LIST_HEAD(&sel_netport_hash[iter].list);
security/selinux/netport.c
231
sel_netport_hash[iter].size = 0;
security/selinux/selinuxfs.c
1877
unsigned int iter;
security/selinux/selinuxfs.c
1881
for (iter = 0; !err && iter <= POLICYDB_CAP_MAX; iter++) {
security/selinux/selinuxfs.c
1884
if (iter < ARRAY_SIZE(selinux_policycap_names))
security/selinux/selinuxfs.c
1885
name = selinux_policycap_names[iter];
security/selinux/selinuxfs.c
1894
inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
security/selinux/ss/ebitmap.c
115
unsigned int iter;
security/selinux/ss/ebitmap.c
129
for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
security/selinux/ss/ebitmap.c
130
e_map = e_iter->maps[iter];
security/selinux/xfrm.c
451
struct dst_entry *iter;
security/selinux/xfrm.c
453
for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
security/selinux/xfrm.c
454
struct xfrm_state *x = iter->xfrm;
sound/core/control.c
354
unsigned int iter = 100000;
sound/core/control.c
357
if (--iter == 0) {
sound/core/memalloc.c
625
struct sg_dma_page_iter iter;
sound/core/memalloc.c
627
snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
sound/core/memalloc.c
628
__sg_page_iter_dma_next(&iter);
sound/core/memalloc.c
629
return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
sound/core/memalloc.c
635
struct sg_page_iter iter;
sound/core/memalloc.c
637
snd_dma_noncontig_iter_set(dmab, &iter, offset);
sound/core/memalloc.c
638
__sg_page_iter_next(&iter);
sound/core/memalloc.c
639
return sg_page_iter_page(&iter);
sound/core/memalloc.c
646
struct sg_dma_page_iter iter;
sound/core/memalloc.c
652
snd_dma_noncontig_iter_set(dmab, &iter.base, start);
sound/core/memalloc.c
653
if (!__sg_page_iter_dma_next(&iter))
sound/core/memalloc.c
656
addr = sg_page_iter_dma_address(&iter);
sound/core/memalloc.c
662
if (!__sg_page_iter_dma_next(&iter) ||
sound/core/memalloc.c
663
sg_page_iter_dma_address(&iter) != addr)
sound/core/memory.c
26
struct iov_iter iter;
sound/core/memory.c
28
if (import_ubuf(ITER_DEST, dst, count, &iter))
sound/core/memory.c
30
if (copy_to_iter_fromio((const void __iomem *)src, count, &iter) != count)
sound/core/memory.c
83
struct iov_iter iter;
sound/core/memory.c
85
if (import_ubuf(ITER_SOURCE, (void __user *)src, count, &iter))
sound/core/memory.c
87
if (copy_from_iter_toio((void __iomem *)dst, count, &iter) != count)
sound/core/pcm_lib.c
2040
struct iov_iter *iter, unsigned long bytes);
sound/core/pcm_lib.c
2057
struct iov_iter *iter, unsigned long bytes)
sound/core/pcm_lib.c
2060
bytes, iter) != bytes)
sound/core/pcm_lib.c
2070
unsigned long hwoff, struct iov_iter *iter,
sound/core/pcm_lib.c
2090
struct iov_iter *iter, unsigned long bytes)
sound/core/pcm_lib.c
2093
bytes, iter) != bytes)
sound/core/pcm_lib.c
2103
struct iov_iter iter;
sound/core/pcm_lib.c
2114
iov_iter_kvec(&iter, type, &kvec, 1, bytes);
sound/core/pcm_lib.c
2115
return transfer(substream, c, hwoff, &iter, bytes);
sound/core/pcm_lib.c
2118
err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
sound/core/pcm_lib.c
2121
return transfer(substream, c, hwoff, &iter, bytes);
sound/drivers/dummy.c
625
struct iov_iter *iter, unsigned long bytes)
sound/isa/cs423x/cs4236.c
500
struct pnp_dev *cdev, *iter;
sound/isa/cs423x/cs4236.c
516
list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) {
sound/isa/cs423x/cs4236.c
517
if (!strcmp(iter->id[0].id, cid)) {
sound/isa/cs423x/cs4236.c
518
cdev = iter;
sound/isa/sb/emu8000_pcm.c
410
#define GET_VAL(sval, iter) \
sound/isa/sb/emu8000_pcm.c
412
if (!iter) \
sound/isa/sb/emu8000_pcm.c
414
else if (copy_from_iter(&sval, 2, iter) != 2) \
sound/isa/sb/emu8000_pcm.c
420
#define LOOP_WRITE(rec, offset, iter, count) \
sound/isa/sb/emu8000_pcm.c
428
GET_VAL(sval, iter); \
sound/isa/sb/emu8000_pcm.c
463
#define LOOP_WRITE(rec, pos, iter, count) \
sound/isa/sb/emu8000_pcm.c
473
GET_VAL(sval, iter); \
sound/isa/sb/emu8000_pcm.c
477
GET_VAL(sval, iter); \
sound/soc/soc-component.c
978
struct iov_iter *iter, unsigned long bytes)
sound/soc/soc-component.c
989
channel, pos, iter, bytes));
sound/soc/soc-generic-dmaengine-pcm.c
293
struct iov_iter *iter, unsigned long bytes)
sound/soc/soc-generic-dmaengine-pcm.c
305
if (copy_from_iter(dma_ptr, bytes, iter) != bytes)
sound/soc/soc-generic-dmaengine-pcm.c
315
if (copy_to_iter(dma_ptr, bytes, iter) != bytes)
sound/soc/sof/topology.c
193
int i, iter;
sound/soc/sof/topology.c
203
iter = exp * -1;
sound/soc/sof/topology.c
205
iter = exp;
sound/soc/sof/topology.c
208
for (i = 0; i < iter; i++) {
tools/bpf/bpftool/cgroup.c
245
__u32 iter;
tools/bpf/bpftool/cgroup.c
259
for (iter = 0; iter < p.prog_cnt; iter++)
tools/bpf/bpftool/cgroup.c
260
show_bpf_prog(prog_ids[iter], type, NULL, level);
tools/bpf/bpftool/cgroup.c
273
__u32 iter;
tools/bpf/bpftool/cgroup.c
288
for (iter = 0; iter < p.prog_cnt; iter++) {
tools/bpf/bpftool/cgroup.c
291
attach_flags = prog_attach_flags[iter] ?: p.attach_flags;
tools/bpf/bpftool/cgroup.c
308
show_bpf_prog(prog_ids[iter], type,
tools/bpf/bpftool/link.c
1057
!info.iter.target_name) {
tools/bpf/bpftool/link.c
1058
info.iter.target_name = ptr_to_u64(&buf);
tools/bpf/bpftool/link.c
1059
info.iter.target_name_len = sizeof(buf);
tools/bpf/bpftool/link.c
203
const char *target_name = u64_to_ptr(info->iter.target_name);
tools/bpf/bpftool/link.c
208
jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
tools/bpf/bpftool/link.c
210
if (info->iter.task.tid)
tools/bpf/bpftool/link.c
211
jsonw_uint_field(wtr, "tid", info->iter.task.tid);
tools/bpf/bpftool/link.c
212
else if (info->iter.task.pid)
tools/bpf/bpftool/link.c
213
jsonw_uint_field(wtr, "pid", info->iter.task.pid);
tools/bpf/bpftool/link.c
217
jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
tools/bpf/bpftool/link.c
219
cgroup_order_string(info->iter.cgroup.order));
tools/bpf/bpftool/link.c
679
const char *target_name = u64_to_ptr(info->iter.target_name);
tools/bpf/bpftool/link.c
684
printf("map_id %u ", info->iter.map.map_id);
tools/bpf/bpftool/link.c
686
if (info->iter.task.tid)
tools/bpf/bpftool/link.c
687
printf("tid %u ", info->iter.task.tid);
tools/bpf/bpftool/link.c
688
else if (info->iter.task.pid)
tools/bpf/bpftool/link.c
689
printf("pid %u ", info->iter.task.pid);
tools/bpf/bpftool/link.c
693
printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id);
tools/bpf/bpftool/link.c
695
cgroup_order_string(info->iter.cgroup.order));
tools/bpf/bpftool/pids.c
146
fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
67
int iter(struct bpf_iter__task_file *ctx)
tools/include/uapi/linux/bpf.h
6776
} iter;
tools/lib/bpf/elf.c
104
memset(iter, 0, sizeof(*iter));
tools/lib/bpf/elf.c
121
iter->strtabidx = sh.sh_link;
tools/lib/bpf/elf.c
122
iter->syms = elf_getdata(scn, 0);
tools/lib/bpf/elf.c
123
if (!iter->syms) {
tools/lib/bpf/elf.c
128
iter->nr_syms = iter->syms->d_size / sh.sh_entsize;
tools/lib/bpf/elf.c
129
iter->elf = elf;
tools/lib/bpf/elf.c
130
iter->st_type = st_type;
tools/lib/bpf/elf.c
139
iter->versyms = elf_getdata(scn, 0);
tools/lib/bpf/elf.c
145
iter->verdefs = elf_getdata(scn, 0);
tools/lib/bpf/elf.c
146
if (!iter->verdefs || !gelf_getshdr(scn, &sh)) {
tools/lib/bpf/elf.c
150
iter->verdef_strtabidx = sh.sh_link;
tools/lib/bpf/elf.c
155
static struct elf_sym *elf_sym_iter_next(struct elf_sym_iter *iter)
tools/lib/bpf/elf.c
157
struct elf_sym *ret = &iter->sym;
tools/lib/bpf/elf.c
164
for (idx = iter->next_sym_idx; idx < iter->nr_syms; idx++) {
tools/lib/bpf/elf.c
165
if (!gelf_getsym(iter->syms, idx, sym))
tools/lib/bpf/elf.c
167
if (GELF_ST_TYPE(sym->st_info) != iter->st_type)
tools/lib/bpf/elf.c
169
name = elf_strptr(iter->elf, iter->strtabidx, sym->st_name);
tools/lib/bpf/elf.c
172
sym_scn = elf_getscn(iter->elf, sym->st_shndx);
tools/lib/bpf/elf.c
178
iter->next_sym_idx = idx + 1;
tools/lib/bpf/elf.c
183
if (iter->versyms) {
tools/lib/bpf/elf.c
184
if (!gelf_getversym(iter->versyms, idx, &versym))
tools/lib/bpf/elf.c
195
static const char *elf_get_vername(struct elf_sym_iter *iter, int ver)
tools/lib/bpf/elf.c
201
if (!iter->verdefs)
tools/lib/bpf/elf.c
205
while (gelf_getverdef(iter->verdefs, offset, &verdef)) {
tools/lib/bpf/elf.c
214
if (!gelf_getverdaux(iter->verdefs, offset + verdef.vd_aux, &verdaux))
tools/lib/bpf/elf.c
217
return elf_strptr(iter->elf, iter->verdef_strtabidx, verdaux.vda_name);
tools/lib/bpf/elf.c
223
static bool symbol_match(struct elf_sym_iter *iter, int sh_type, struct elf_sym *sym,
tools/lib/bpf/elf.c
248
ver_name = elf_get_vername(iter, sym->ver);
tools/lib/bpf/elf.c
312
struct elf_sym_iter iter;
tools/lib/bpf/elf.c
317
ret = elf_sym_iter_new(&iter, elf, binary_path, sh_types[i], STT_FUNC);
tools/lib/bpf/elf.c
323
while ((sym = elf_sym_iter_next(&iter))) {
tools/lib/bpf/elf.c
324
if (!symbol_match(&iter, sh_types[i], sym, name, name_len, lib_ver))
tools/lib/bpf/elf.c
437
struct elf_sym_iter iter;
tools/lib/bpf/elf.c
440
err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], st_type);
tools/lib/bpf/elf.c
446
while ((sym = elf_sym_iter_next(&iter))) {
tools/lib/bpf/elf.c
518
struct elf_sym_iter iter;
tools/lib/bpf/elf.c
521
err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC);
tools/lib/bpf/elf.c
527
while ((sym = elf_sym_iter_next(&iter))) {
tools/lib/bpf/elf.c
96
static int elf_sym_iter_new(struct elf_sym_iter *iter,
tools/net/ynl/lib/ynl.h
107
#define ynl_dump_foreach(dump, iter) \
tools/net/ynl/lib/ynl.h
108
for (typeof(dump->obj) *iter = &dump->obj; \
tools/net/ynl/lib/ynl.h
109
!ynl_dump_obj_is_last(iter); \
tools/net/ynl/lib/ynl.h
110
iter = ynl_dump_obj_next(iter))
tools/objtool/arch/x86/special.c
22
struct special_alt *iter = group;
tools/objtool/arch/x86/special.c
24
unsigned int len = max(iter->orig_len, alt->orig_len);
tools/objtool/arch/x86/special.c
25
iter->orig_len = alt->orig_len = len;
tools/objtool/arch/x86/special.c
27
if (iter == prev)
tools/objtool/arch/x86/special.c
30
iter = list_next_entry(iter, list);
tools/objtool/elf.c
478
struct symbol *iter;
tools/objtool/elf.c
492
__sym_for_each(iter, &sym->sec->symbol_tree, sym->offset, sym->offset) {
tools/objtool/elf.c
493
if (!is_undef_sym(iter) && iter->offset == sym->offset &&
tools/objtool/elf.c
494
iter->type == sym->type && iter->len == sym->len)
tools/objtool/elf.c
495
iter->alias = sym;
tools/perf/bench/epoll-wait.c
383
size_t i, j, iter;
tools/perf/bench/epoll-wait.c
392
for (iter = 0; !wdone; iter++) {
tools/perf/bench/epoll-wait.c
414
printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
tools/perf/builtin-annotate.c
102
struct block_range_iter iter;
tools/perf/builtin-annotate.c
112
iter = block_range__create(start->addr, end->addr);
tools/perf/builtin-annotate.c
113
if (!block_range_iter__valid(&iter))
tools/perf/builtin-annotate.c
121
entry = block_range_iter(&iter);
tools/perf/builtin-annotate.c
126
entry = block_range_iter(&iter);
tools/perf/builtin-annotate.c
134
} while (block_range_iter__next(&iter));
tools/perf/builtin-annotate.c
139
entry = block_range_iter(&iter);
tools/perf/builtin-annotate.c
172
static int hist_iter__branch_callback(struct hist_entry_iter *iter,
tools/perf/builtin-annotate.c
177
struct hist_entry *he = iter->he;
tools/perf/builtin-annotate.c
179
struct perf_sample *sample = iter->sample;
tools/perf/builtin-annotate.c
180
struct evsel *evsel = iter->evsel;
tools/perf/builtin-annotate.c
201
struct hist_entry_iter iter = {
tools/perf/builtin-annotate.c
228
ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
tools/perf/builtin-annotate.c
375
struct annotated_item_stat *istat, *pos, *iter;
tools/perf/builtin-annotate.c
389
list_for_each_entry(iter, head, list) {
tools/perf/builtin-annotate.c
390
sum2 = iter->good + iter->bad;
tools/perf/builtin-annotate.c
394
list_move_tail(&istat->list, &iter->list);
tools/perf/builtin-diff.c
399
struct hist_entry_iter iter = {
tools/perf/builtin-diff.c
438
if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
tools/perf/builtin-report.c
166
static int hist_iter__report_callback(struct hist_entry_iter *iter,
tools/perf/builtin-report.c
172
struct hist_entry *he = iter->he;
tools/perf/builtin-report.c
173
struct evsel *evsel = iter->evsel;
tools/perf/builtin-report.c
174
struct perf_sample *sample = iter->sample;
tools/perf/builtin-report.c
208
static int hist_iter__branch_callback(struct hist_entry_iter *iter,
tools/perf/builtin-report.c
213
struct hist_entry *he = iter->he;
tools/perf/builtin-report.c
216
struct perf_sample *sample = iter->sample;
tools/perf/builtin-report.c
217
struct evsel *evsel = iter->evsel;
tools/perf/builtin-report.c
277
struct hist_entry_iter iter = {
tools/perf/builtin-report.c
318
iter.add_entry_cb = hist_iter__branch_callback;
tools/perf/builtin-report.c
319
iter.ops = &hist_iter_branch;
tools/perf/builtin-report.c
321
iter.ops = &hist_iter_mem;
tools/perf/builtin-report.c
323
iter.ops = &hist_iter_cumulative;
tools/perf/builtin-report.c
325
iter.ops = &hist_iter_normal;
tools/perf/builtin-report.c
341
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
tools/perf/builtin-top.c
728
static int hist_iter__top_callback(struct hist_entry_iter *iter,
tools/perf/builtin-top.c
731
EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock)
tools/perf/builtin-top.c
734
struct evsel *evsel = iter->evsel;
tools/perf/builtin-top.c
737
perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
tools/perf/builtin-top.c
739
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
tools/perf/builtin-top.c
834
struct hist_entry_iter iter = {
tools/perf/builtin-top.c
841
iter.ops = &hist_iter_cumulative;
tools/perf/builtin-top.c
843
iter.ops = &hist_iter_normal;
tools/perf/builtin-top.c
847
if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0)
tools/perf/tests/hists_cumulate.c
109
if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
tools/perf/tests/hists_cumulate.c
89
struct hist_entry_iter iter = {
tools/perf/tests/hists_cumulate.c
96
iter.ops = &hist_iter_cumulative;
tools/perf/tests/hists_cumulate.c
98
iter.ops = &hist_iter_normal;
tools/perf/tests/hists_filter.c
65
struct hist_entry_iter iter = {
tools/perf/tests/hists_filter.c
87
if (hist_entry_iter__add(&iter, &al,
tools/perf/tests/hists_output.c
59
struct hist_entry_iter iter = {
tools/perf/tests/hists_output.c
75
if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
tools/perf/ui/gtk/annotate.c
137
GtkTreeIter iter;
tools/perf/ui/gtk/annotate.c
140
gtk_list_store_append(store, &iter);
tools/perf/ui/gtk/annotate.c
157
gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
tools/perf/ui/gtk/annotate.c
159
gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
tools/perf/ui/gtk/annotate.c
161
gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
tools/perf/ui/gtk/hists.c
106
GtkTreeIter iter, new_parent;
tools/perf/ui/gtk/hists.c
119
gtk_tree_store_append(store, &iter, &new_parent);
tools/perf/ui/gtk/hists.c
122
gtk_tree_store_set(store, &iter, 0, buf, -1);
tools/perf/ui/gtk/hists.c
125
gtk_tree_store_set(store, &iter, col, buf, -1);
tools/perf/ui/gtk/hists.c
132
new_parent = iter;
tools/perf/ui/gtk/hists.c
140
gtk_tree_store_append(store, &iter, &new_parent);
tools/perf/ui/gtk/hists.c
143
gtk_tree_store_set(store, &iter, 0, buf, -1);
tools/perf/ui/gtk/hists.c
146
gtk_tree_store_set(store, &iter, col, buf, -1);
tools/perf/ui/gtk/hists.c
153
new_parent = iter;
tools/perf/ui/gtk/hists.c
168
GtkTreeIter iter;
tools/perf/ui/gtk/hists.c
209
gtk_tree_store_append(store, &iter, parent);
tools/perf/ui/gtk/hists.c
212
gtk_tree_store_set(store, &iter, 0, buf, -1);
tools/perf/ui/gtk/hists.c
214
gtk_tree_store_set(store, &iter, col, str, -1);
tools/perf/ui/gtk/hists.c
229
GtkTreeIter iter, new_parent;
tools/perf/ui/gtk/hists.c
241
gtk_tree_store_append(store, &iter, &new_parent);
tools/perf/ui/gtk/hists.c
244
gtk_tree_store_set(store, &iter, 0, buf, -1);
tools/perf/ui/gtk/hists.c
247
gtk_tree_store_set(store, &iter, col, buf, -1);
tools/perf/ui/gtk/hists.c
254
new_parent = iter;
tools/perf/ui/gtk/hists.c
265
perf_gtk__add_callchain_graph(&node->rb_root, store, &iter, col,
tools/perf/ui/gtk/hists.c
360
GtkTreeIter iter;
tools/perf/ui/gtk/hists.c
371
gtk_tree_store_append(store, &iter, NULL);
tools/perf/ui/gtk/hists.c
384
gtk_tree_store_set(store, &iter, col_idx++, s, -1);
tools/perf/ui/gtk/hists.c
393
perf_gtk__add_callchain(&h->sorted_chain, store, &iter,
tools/perf/ui/gtk/hists.c
421
GtkTreeIter iter;
tools/perf/ui/gtk/hists.c
433
gtk_tree_store_append(store, &iter, parent);
tools/perf/ui/gtk/hists.c
446
gtk_tree_store_set(store, &iter, col_idx++, hpp->buf, -1);
tools/perf/ui/gtk/hists.c
463
gtk_tree_store_set(store, &iter, col_idx, strim(bf), -1);
tools/perf/ui/gtk/hists.c
470
store, &iter, hpp,
tools/perf/ui/gtk/hists.c
480
gtk_tree_store_append(store, &child, &iter);
tools/perf/ui/gtk/hists.c
490
perf_gtk__add_callchain(&he->sorted_chain, store, &iter,
tools/perf/util/annotate.c
1064
struct annotation_line *iter;
tools/perf/util/annotate.c
1072
iter = rb_entry(parent, struct annotation_line, rb_node);
tools/perf/util/annotate.c
1074
ret = strcmp(iter->path, al->path);
tools/perf/util/annotate.c
1077
iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
tools/perf/util/annotate.c
1113
struct annotation_line *iter;
tools/perf/util/annotate.c
1119
iter = rb_entry(parent, struct annotation_line, rb_node);
tools/perf/util/annotate.c
1121
if (cmp_source_line(al, iter))
tools/perf/util/block-range.c
118
return iter;
tools/perf/util/block-range.c
131
iter.start = head;
tools/perf/util/block-range.c
141
return iter;
tools/perf/util/block-range.c
154
iter.start = entry;
tools/perf/util/block-range.c
155
iter.end = entry;
tools/perf/util/block-range.c
165
return iter;
tools/perf/util/block-range.c
188
iter.start = entry;
tools/perf/util/block-range.c
195
entry = iter.start;
tools/perf/util/block-range.c
203
return iter;
tools/perf/util/block-range.c
225
iter.end = entry;
tools/perf/util/block-range.c
234
iter.end = entry;
tools/perf/util/block-range.c
250
return iter;
tools/perf/util/block-range.c
263
iter.end = tail;
tools/perf/util/block-range.c
273
return iter;
tools/perf/util/block-range.c
291
assert(iter.start->start == start && iter.start->is_target);
tools/perf/util/block-range.c
292
assert(iter.end->end == end && iter.end->is_branch);
tools/perf/util/block-range.c
296
return iter;
tools/perf/util/block-range.c
82
struct block_range_iter iter = { NULL, NULL };
tools/perf/util/block-range.h
51
static inline struct block_range *block_range_iter(struct block_range_iter *iter)
tools/perf/util/block-range.h
53
return iter->start;
tools/perf/util/block-range.h
56
static inline bool block_range_iter__next(struct block_range_iter *iter)
tools/perf/util/block-range.h
58
if (iter->start == iter->end)
tools/perf/util/block-range.h
61
iter->start = block_range__next(iter->start);
tools/perf/util/block-range.h
65
static inline bool block_range_iter__valid(struct block_range_iter *iter)
tools/perf/util/block-range.h
67
if (!iter->start || !iter->end)
tools/perf/util/bpf_skel/lock_contention.bpf.c
970
struct bpf_iter__kmem_cache___new *iter = ctx;
tools/perf/util/bpf_skel/lock_contention.bpf.c
972
s = iter->s;
tools/perf/util/hist.c
1000
iter->bi = bi;
tools/perf/util/hist.c
1005
iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
tools/perf/util/hist.c
1012
iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
tools/perf/util/hist.c
1014
struct branch_info *bi = iter->bi;
tools/perf/util/hist.c
1015
int i = iter->curr;
tools/perf/util/hist.c
1020
if (iter->curr >= iter->total)
tools/perf/util/hist.c
1033
iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
tools/perf/util/hist.c
1036
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
1038
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
1040
int i = iter->curr;
tools/perf/util/hist.c
1043
bi = iter->bi;
tools/perf/util/hist.c
1045
if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
tools/perf/util/hist.c
1055
he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
tools/perf/util/hist.c
1061
iter->he = he;
tools/perf/util/hist.c
1062
iter->curr++;
tools/perf/util/hist.c
1075
iter_finish_branch_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1078
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
1081
for (int i = 0; i < iter->total; i++)
tools/perf/util/hist.c
1082
branch_info__exit(&iter->bi[i]);
tools/perf/util/hist.c
1084
if (iter->he)
tools/perf/util/hist.c
1085
hists__inc_nr_samples(hists, iter->he->filtered);
tools/perf/util/hist.c
1087
zfree(&iter->bi);
tools/perf/util/hist.c
1088
iter->he = NULL;
tools/perf/util/hist.c
1090
return iter->curr >= iter->total ? 0 : -1;
tools/perf/util/hist.c
1094
iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
tools/perf/util/hist.c
1101
iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
tools/perf/util/hist.c
1103
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
1104
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
1107
he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
tools/perf/util/hist.c
1112
iter->he = he;
tools/perf/util/hist.c
1117
iter_finish_normal_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1120
struct hist_entry *he = iter->he;
tools/perf/util/hist.c
1121
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
1122
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
1127
iter->he = NULL;
tools/perf/util/hist.c
1135
iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1155
iter->he_cache = he_cache;
tools/perf/util/hist.c
1156
iter->curr = 0;
tools/perf/util/hist.c
1162
iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1165
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
1167
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
1168
struct hist_entry **he_cache = iter->he_cache;
tools/perf/util/hist.c
1172
he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
tools/perf/util/hist.c
1177
iter->he = he;
tools/perf/util/hist.c
1178
he_cache[iter->curr++] = he;
tools/perf/util/hist.c
1194
iter_next_cumulative_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1203
return fill_callchain_info(al, node, iter->hide_unresolved);
tools/perf/util/hist.c
1221
iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1224
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
1225
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
1226
struct hist_entry **he_cache = iter->he_cache;
tools/perf/util/hist.c
1240
.parent = iter->parent,
tools/perf/util/hist.c
1259
for (i = 0; i < iter->curr; i++) {
tools/perf/util/hist.c
1270
iter->he = NULL;
tools/perf/util/hist.c
1275
he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
tools/perf/util/hist.c
1280
iter->he = he;
tools/perf/util/hist.c
1281
he_cache[iter->curr++] = he;
tools/perf/util/hist.c
1289
iter_finish_cumulative_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
1292
mem_info__zput(iter->mi);
tools/perf/util/hist.c
1293
zfree(&iter->bi);
tools/perf/util/hist.c
1294
zfree(&iter->he_cache);
tools/perf/util/hist.c
1295
iter->he = NULL;
tools/perf/util/hist.c
1332
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
tools/perf/util/hist.c
1341
err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
tools/perf/util/hist.c
1342
iter->evsel, al, max_stack_depth);
tools/perf/util/hist.c
1348
err = iter->ops->prepare_entry(iter, al);
tools/perf/util/hist.c
1352
err = iter->ops->add_single_entry(iter, al);
tools/perf/util/hist.c
1356
if (iter->he && iter->add_entry_cb) {
tools/perf/util/hist.c
1357
err = iter->add_entry_cb(iter, al, true, arg);
tools/perf/util/hist.c
1362
while (iter->ops->next_entry(iter, al)) {
tools/perf/util/hist.c
1363
err = iter->ops->add_next_entry(iter, al);
tools/perf/util/hist.c
1367
if (iter->he && iter->add_entry_cb) {
tools/perf/util/hist.c
1368
err = iter->add_entry_cb(iter, al, false, arg);
tools/perf/util/hist.c
1375
err2 = iter->ops->finish_entry(iter, al);
tools/perf/util/hist.c
1631
struct hist_entry *iter, *new;
tools/perf/util/hist.c
1638
iter = rb_entry(parent, struct hist_entry, rb_node_in);
tools/perf/util/hist.c
1639
cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
tools/perf/util/hist.c
1641
he_stat__add_stat(&iter->stat, &he->stat);
tools/perf/util/hist.c
1642
hists__add_mem_stat(hists, iter, he);
tools/perf/util/hist.c
1643
return iter;
tools/perf/util/hist.c
1752
struct hist_entry *iter;
tools/perf/util/hist.c
1761
iter = rb_entry(parent, struct hist_entry, rb_node_in);
tools/perf/util/hist.c
1763
cmp = hist_entry__collapse(iter, he);
tools/perf/util/hist.c
1768
he_stat__add_stat(&iter->stat, &he->stat);
tools/perf/util/hist.c
1770
he_stat__add_stat(iter->stat_acc, he->stat_acc);
tools/perf/util/hist.c
1771
hists__add_mem_stat(hists, iter, he);
tools/perf/util/hist.c
1778
if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
tools/perf/util/hist.c
1935
struct hist_entry *iter;
tools/perf/util/hist.c
1941
iter = rb_entry(parent, struct hist_entry, rb_node);
tools/perf/util/hist.c
1943
if (hist_entry__sort(he, iter) > 0)
tools/perf/util/hist.c
2022
struct hist_entry *iter;
tools/perf/util/hist.c
2041
iter = rb_entry(parent, struct hist_entry, rb_node);
tools/perf/util/hist.c
2043
if (hist_entry__sort(he, iter) > 0)
tools/perf/util/hist.c
2355
struct hist_entry *iter;
tools/perf/util/hist.c
2362
iter = rb_entry(parent, struct hist_entry, rb_node);
tools/perf/util/hist.c
2364
if (hist_entry__sort(he, iter) > 0)
tools/perf/util/hist.c
2642
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
tools/perf/util/hist.c
2643
int64_t cmp = hist_entry__collapse(iter, he);
tools/perf/util/hist.c
2650
return iter;
tools/perf/util/hist.c
2662
struct hist_entry *iter;
tools/perf/util/hist.c
2665
iter = rb_entry(n, struct hist_entry, rb_node_in);
tools/perf/util/hist.c
2666
cmp = hist_entry__collapse_hierarchy(he->hpp_list, iter, he);
tools/perf/util/hist.c
2672
return iter;
tools/perf/util/hist.c
903
iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
tools/perf/util/hist.c
910
iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
tools/perf/util/hist.c
917
iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
tools/perf/util/hist.c
919
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
926
iter->mi = mi;
tools/perf/util/hist.c
931
iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
tools/perf/util/hist.c
934
struct mem_info *mi = iter->mi;
tools/perf/util/hist.c
935
struct hists *hists = evsel__hists(iter->evsel);
tools/perf/util/hist.c
936
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
955
he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
tools/perf/util/hist.c
960
iter->he = he;
tools/perf/util/hist.c
965
iter_finish_mem_entry(struct hist_entry_iter *iter,
tools/perf/util/hist.c
968
struct evsel *evsel = iter->evsel;
tools/perf/util/hist.c
970
struct hist_entry *he = iter->he;
tools/perf/util/hist.c
978
err = hist_entry__append_callchain(he, iter->sample);
tools/perf/util/hist.c
981
mem_info__zput(iter->mi);
tools/perf/util/hist.c
983
iter->he = NULL;
tools/perf/util/hist.c
988
iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
tools/perf/util/hist.c
991
struct perf_sample *sample = iter->sample;
tools/perf/util/hist.c
997
iter->curr = 0;
tools/perf/util/hist.c
998
iter->total = sample->branch_stack->nr;
tools/perf/util/hist.h
169
int (*add_entry_cb)(struct hist_entry_iter *iter,
tools/perf/util/hist.h
367
int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
tools/perf/util/machine.c
2154
struct iterations *iter,
tools/perf/util/machine.c
2218
if (iter) {
tools/perf/util/machine.c
2219
nr_loop_iter = iter->nr_loop_iter;
tools/perf/util/machine.c
2220
iter_cycles = iter->cycles;
tools/perf/util/machine.c
2263
static void save_iterations(struct iterations *iter,
tools/perf/util/machine.c
2268
iter->nr_loop_iter++;
tools/perf/util/machine.c
2269
iter->cycles = 0;
tools/perf/util/machine.c
2272
iter->cycles += be[i].flags.cycles;
tools/perf/util/machine.c
2283
struct iterations *iter)
tools/perf/util/machine.c
2310
save_iterations(iter + i + off,
tools/perf/util/machine.c
2313
memmove(iter + i, iter + i + off,
tools/perf/util/machine.c
2314
j * sizeof(*iter));
tools/perf/util/machine.c
2833
struct iterations iter[nr];
tools/perf/util/machine.c
2864
memset(iter, 0, sizeof(struct iterations) * nr);
tools/perf/util/machine.c
2865
nr = remove_loops(be, nr, iter);
tools/perf/util/machine.c
2878
&iter[i], 0, symbols);
tools/perf/util/ordered-events.c
227
struct ordered_event *tmp, *iter;
tools/perf/util/ordered-events.c
239
list_for_each_entry_safe(iter, tmp, head, list) {
tools/perf/util/ordered-events.c
243
if (iter->timestamp > limit)
tools/perf/util/ordered-events.c
245
ret = oe->deliver(oe, iter);
tools/perf/util/ordered-events.c
249
ordered_events__delete(oe, iter);
tools/perf/util/ordered-events.c
250
oe->last_flush = iter->timestamp;
tools/power/x86/turbostat/turbostat.c
2008
const struct dirent *pmt_diriter_next(struct pmt_diriter_t *iter)
tools/power/x86/turbostat/turbostat.c
2012
if (!iter->dir)
tools/power/x86/turbostat/turbostat.c
2015
if (iter->current_name_idx >= iter->num_names)
tools/power/x86/turbostat/turbostat.c
2018
ret = iter->namelist[iter->current_name_idx];
tools/power/x86/turbostat/turbostat.c
2019
++iter->current_name_idx;
tools/power/x86/turbostat/turbostat.c
2024
const struct dirent *pmt_diriter_begin(struct pmt_diriter_t *iter, const char *pmt_root_path)
tools/power/x86/turbostat/turbostat.c
2026
int num_names = iter->num_names;
tools/power/x86/turbostat/turbostat.c
2028
if (!iter->dir) {
tools/power/x86/turbostat/turbostat.c
2029
iter->dir = opendir(pmt_root_path);
tools/power/x86/turbostat/turbostat.c
2030
if (iter->dir == NULL)
tools/power/x86/turbostat/turbostat.c
2033
num_names = scandir(pmt_root_path, &iter->namelist, pmt_telemdir_filter, pmt_telemdir_sort);
tools/power/x86/turbostat/turbostat.c
2038
iter->current_name_idx = 0;
tools/power/x86/turbostat/turbostat.c
2039
iter->num_names = num_names;
tools/power/x86/turbostat/turbostat.c
2041
return pmt_diriter_next(iter);
tools/power/x86/turbostat/turbostat.c
2044
void pmt_diriter_init(struct pmt_diriter_t *iter)
tools/power/x86/turbostat/turbostat.c
2046
memset(iter, 0, sizeof(*iter));
tools/power/x86/turbostat/turbostat.c
2049
void pmt_diriter_remove(struct pmt_diriter_t *iter)
tools/power/x86/turbostat/turbostat.c
2051
if (iter->namelist) {
tools/power/x86/turbostat/turbostat.c
2052
for (unsigned int i = 0; i < iter->num_names; i++) {
tools/power/x86/turbostat/turbostat.c
2053
free(iter->namelist[i]);
tools/power/x86/turbostat/turbostat.c
2054
iter->namelist[i] = NULL;
tools/power/x86/turbostat/turbostat.c
2058
free(iter->namelist);
tools/power/x86/turbostat/turbostat.c
2059
iter->namelist = NULL;
tools/power/x86/turbostat/turbostat.c
2060
iter->num_names = 0;
tools/power/x86/turbostat/turbostat.c
2061
iter->current_name_idx = 0;
tools/power/x86/turbostat/turbostat.c
2063
closedir(iter->dir);
tools/power/x86/turbostat/turbostat.c
2064
iter->dir = NULL;
tools/testing/cxl/test/cxl.c
780
struct cxl_port *port, *iter;
tools/testing/cxl/test/cxl.c
845
iter = port;
tools/testing/cxl/test/cxl.c
847
dport = iter->parent_dport;
tools/testing/cxl/test/cxl.c
848
iter = dport->port;
tools/testing/cxl/test/cxl.c
849
dev = device_find_child(&iter->dev, NULL, first_decoder);
tools/testing/cxl/test/cxl.c
874
iter->commit_end = 0;
tools/testing/radix-tree/benchmark.c
17
struct radix_tree_iter iter;
tools/testing/radix-tree/benchmark.c
29
radix_tree_for_each_tagged(slot, root, &iter, 0, 0)
tools/testing/radix-tree/benchmark.c
32
radix_tree_for_each_slot(slot, root, &iter, 0)
tools/testing/radix-tree/regression3.c
34
struct radix_tree_iter iter;
tools/testing/radix-tree/regression3.c
44
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
tools/testing/radix-tree/regression3.c
45
printv(2, "tagged %ld %p\n", iter.index, *slot);
tools/testing/radix-tree/regression3.c
52
printv(2, "retry at %ld\n", iter.index);
tools/testing/radix-tree/regression3.c
53
slot = radix_tree_iter_retry(&iter);
tools/testing/radix-tree/regression3.c
60
radix_tree_for_each_slot(slot, &root, &iter, 0) {
tools/testing/radix-tree/regression3.c
61
printv(2, "slot %ld %p\n", iter.index, *slot);
tools/testing/radix-tree/regression3.c
67
printv(2, "retry at %ld\n", iter.index);
tools/testing/radix-tree/regression3.c
68
slot = radix_tree_iter_retry(&iter);
tools/testing/radix-tree/regression3.c
73
radix_tree_for_each_slot(slot, &root, &iter, 0) {
tools/testing/radix-tree/regression3.c
74
printv(2, "slot %ld %p\n", iter.index, *slot);
tools/testing/radix-tree/regression3.c
75
if (!iter.index) {
tools/testing/radix-tree/regression3.c
76
printv(2, "next at %ld\n", iter.index);
tools/testing/radix-tree/regression3.c
77
slot = radix_tree_iter_resume(slot, &iter);
tools/testing/radix-tree/regression3.c
83
radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
tools/testing/radix-tree/regression3.c
84
printv(2, "tagged %ld %p\n", iter.index, *slot);
tools/testing/radix-tree/regression3.c
85
if (!iter.index) {
tools/testing/radix-tree/regression3.c
86
printv(2, "next at %ld\n", iter.index);
tools/testing/radix-tree/regression3.c
87
slot = radix_tree_iter_resume(slot, &iter);
tools/testing/selftests/alsa/conf.c
199
int iter = 0, ret;
tools/testing/selftests/alsa/conf.c
211
iter++;
tools/testing/selftests/alsa/conf.c
222
return iter > 0;
tools/testing/selftests/arm64/fp/kernel-test.c
313
printf("Failed to compute digest, iter=%d\n", iter);
tools/testing/selftests/arm64/fp/kernel-test.c
318
printf("Digest mismatch, iter=%d\n", iter);
tools/testing/selftests/arm64/fp/kernel-test.c
322
iter++;
tools/testing/selftests/arm64/fp/kernel-test.c
34
static int iter;
tools/testing/selftests/arm64/fp/kernel-test.c
39
sig, iter, sigs);
tools/testing/selftests/bpf/bench.c
159
void ops_report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/bench.c
166
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/bench.c
192
void local_storage_report_progress(int iter, struct bench_res *res,
tools/testing/selftests/bpf/bench.c
201
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/bench.c
39
void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/bench.c
44
iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/bench.c
68
void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/bench.c
741
int iter = state.res_cnt++;
tools/testing/selftests/bpf/bench.c
742
struct bench_res *res = &state.results[iter];
tools/testing/selftests/bpf/bench.c
747
bench->report_progress(iter, res, delta_ns);
tools/testing/selftests/bpf/bench.c
749
if (iter == env.duration_sec + env.warmup_sec) {
tools/testing/selftests/bpf/bench.c
78
iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/bench.h
61
void (*report_progress)(int iter, struct bench_res* res, long delta_ns);
tools/testing/selftests/bpf/bench.h
73
void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns);
tools/testing/selftests/bpf/bench.h
75
void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns);
tools/testing/selftests/bpf/bench.h
77
void ops_report_progress(int iter, struct bench_res *res, long delta_ns);
tools/testing/selftests/bpf/bench.h
79
void local_storage_report_progress(int iter, struct bench_res *res,
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
298
static void htab_mem_report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
304
printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
204
static void report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/benchs/bench_local_storage_create.c
212
iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
207
static void report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
219
iter, res->gp_ns / (double)res->gp_ct);
tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
221
iter, res->stime / (double)res->gp_ct);
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
364
static void frac_second_report_progress(int iter, struct bench_res *res,
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
374
printf("Iter %3d (%7.3lfus): ", iter,
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
413
static void insert_ops_report_progress(int iter, struct bench_res *res,
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
419
frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
422
static void delete_ops_report_progress(int iter, struct bench_res *res,
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
428
frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
431
static void free_ops_report_progress(int iter, struct bench_res *res,
tools/testing/selftests/bpf/benchs/bench_lpm_trie_map.c
437
frac_second_report_progress(iter, res, delta_ns, rate_divisor, rate);
tools/testing/selftests/bpf/benchs/bench_sockmap.c
502
static void report_progress(int iter, struct bench_res *res, long delta_ns)
tools/testing/selftests/bpf/benchs/bench_sockmap.c
513
iter, (delta_ns - 1000000000) / 1000.0);
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
702
int iter;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
712
int i, j, ret, iter, key_size;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
718
for (iter = 0; iter < info->iter; iter++)
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
723
j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1;
tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c
750
info->iter = 2000;
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
198
ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
32
struct kallsym_iter *iter = ctx->ksym;
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
37
if (!iter)
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
45
BPF_SEQ_PRINTF(seq, "0x%x\n", iter->value - last_sym_value);
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
49
value = iter->show_value ? iter->value : 0;
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
53
type = iter->type;
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
55
if (iter->module_name[0]) {
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
56
type = iter->exported ? to_upper(type) : to_lower(type);
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
58
value, type, iter->name, iter->module_name);
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
60
BPF_SEQ_PRINTF(seq, "0x%llx %c %s ", value, type, iter->name);
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
62
if (!iter->pos_mod_end || iter->pos_mod_end > iter->pos)
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
64
else if (!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > iter->pos)
tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
66
else if (!iter->pos_bpf_end || iter->pos_bpf_end > iter->pos)
tools/testing/selftests/bpf/progs/iters_looping.c
120
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_looping.c
132
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_looping.c
158
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_looping.c
168
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_looping.c
190
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_looping.c
35
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_looping.c
55
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_looping.c
66
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_looping.c
86
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_looping.c
99
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
113
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
124
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
138
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
149
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
169
: __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel)
tools/testing/selftests/bpf/progs/iters_state_safety.c
178
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
180
bpf_iter_num_new(&iter, 0, 1);
tools/testing/selftests/bpf/progs/iters_state_safety.c
202
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
225
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
236
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
253
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
264
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
279
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
290
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
300
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
311
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
326
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
36
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
375
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
417
: __imm_ptr(iter),
tools/testing/selftests/bpf/progs/iters_state_safety.c
48
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
59
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
68
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
80
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/iters_state_safety.c
86
: __imm_ptr(iter), ITER_HELPERS
tools/testing/selftests/bpf/progs/iters_state_safety.c
97
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
204
struct bpf_iter_bits iter;
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
209
err = bpf_iter_bits_new(&iter, bad_addr, 1);
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
210
bpf_iter_bits_destroy(&iter);
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
220
err = bpf_iter_bits_new(&iter, bad_addr, 4);
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
221
bpf_iter_bits_destroy(&iter);
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
60
struct bpf_iter_bits iter;
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
64
err = bpf_iter_bits_new(&iter, NULL, 1);
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
65
bpf_iter_bits_destroy(&iter);
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
486
struct bpf_iter_num iter;
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
527
: __imm_ptr(iter),
tools/testing/selftests/bpf/trace_helpers.c
481
int read_trace_pipe_iter(void (*cb)(const char *str, void *data), void *data, int iter)
tools/testing/selftests/bpf/trace_helpers.c
495
if (iter)
tools/testing/selftests/bpf/trace_helpers.c
501
if (iter && !(--iter))
tools/testing/selftests/bpf/trace_helpers.h
56
void *data, int iter);
tools/testing/selftests/kvm/lib/arm64/vgic.c
83
struct list_head *iter;
tools/testing/selftests/kvm/lib/arm64/vgic.c
92
list_for_each(iter, &vm->vcpus)
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
89
static inline void dummy_func_loop(uint64_t iter)
tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
93
while (i < iter) {
tools/testing/selftests/kvm/x86/amx_test.c
278
int iter = 0;
tools/testing/selftests/kvm/x86/amx_test.c
288
++iter;
tools/testing/selftests/kvm/x86/amx_test.c
290
fprintf(stderr, "GUEST_SYNC #%d, save tiledata\n", iter);
tools/testing/selftests/kvm/x86/amx_test.c
294
fprintf(stderr, "GUEST_SYNC #%d, check TMM0 contents\n", iter);
tools/testing/selftests/kvm/x86/amx_test.c
307
fprintf(stderr, "GUEST_SYNC #%d, before KVM_SET_XSAVE\n", iter);
tools/testing/selftests/kvm/x86/amx_test.c
309
fprintf(stderr, "GUEST_SYNC #%d, after KVM_SET_XSAVE\n", iter);
tools/testing/selftests/kvm/x86/amx_test.c
312
fprintf(stderr, "GUEST_SYNC #%d, save/restore VM state\n", iter);
tools/testing/selftests/mm/hugetlb-read-hwpoison.c
44
char iter = 0;
tools/testing/selftests/mm/hugetlb-read-hwpoison.c
48
iter++;
tools/testing/selftests/mm/hugetlb-read-hwpoison.c
49
memset(filemap + offset, iter, wr_chunk_size);
tools/testing/selftests/net/nettest.c
1316
if (iter != -1) {
tools/testing/selftests/net/nettest.c
1317
--iter;
tools/testing/selftests/net/nettest.c
1318
if (iter == 0)
tools/testing/selftests/net/nettest.c
141
static int iter = 1;
tools/testing/selftests/net/nettest.c
2098
iter = atoi(optarg);
tools/testing/selftests/net/nettest.c
2201
if (iter == 0) {
tools/testing/selftests/net/tcp_fastopen_backup_key.c
213
static int iter;
tools/testing/selftests/net/tcp_fastopen_backup_key.c
219
if (iter < N_LISTEN) {
tools/testing/selftests/net/tcp_fastopen_backup_key.c
221
if (iter == 0) {
tools/testing/selftests/net/tcp_fastopen_backup_key.c
236
if (++iter >= (N_LISTEN * 2))
tools/testing/selftests/net/tcp_fastopen_backup_key.c
237
iter = 0;
tools/testing/selftests/perf_events/sigtrap_threads.c
101
ctx.iterate_on = iter; /* idempotent write */
tools/testing/selftests/perf_events/sigtrap_threads.c
91
int iter;
tools/testing/selftests/perf_events/sigtrap_threads.c
97
iter = ctx.iterate_on; /* read */
tools/testing/selftests/perf_events/sigtrap_threads.c
98
if (iter >= 0) {
tools/testing/selftests/perf_events/sigtrap_threads.c
99
for (i = 0; i < iter - 1; i++) {
tools/testing/selftests/pidfd/pidfd_poll_test.c
29
int iter, nevents;
tools/testing/selftests/pidfd/pidfd_poll_test.c
47
for (iter = 0; iter < nr_iterations; iter++) {
tools/testing/selftests/pidfd/pidfd_poll_test.c
53
iter--;
tools/testing/selftests/powerpc/security/entry_flush.c
112
iter = repetitions;
tools/testing/selftests/powerpc/security/entry_flush.c
24
int fd, passes = 0, iter, rc = 0;
tools/testing/selftests/powerpc/security/entry_flush.c
66
iter = repetitions;
tools/testing/selftests/powerpc/security/entry_flush.c
88
while (--iter)
tools/testing/selftests/powerpc/security/rfi_flush.c
112
iter = repetitions;
tools/testing/selftests/powerpc/security/rfi_flush.c
24
int fd, passes = 0, iter, rc = 0;
tools/testing/selftests/powerpc/security/rfi_flush.c
67
iter = repetitions;
tools/testing/selftests/powerpc/security/rfi_flush.c
89
while (--iter)
tools/testing/selftests/powerpc/security/uaccess_flush.c
102
while (--iter)
tools/testing/selftests/powerpc/security/uaccess_flush.c
126
iter = repetitions;
tools/testing/selftests/powerpc/security/uaccess_flush.c
25
int fd, passes = 0, iter, rc = 0;
tools/testing/selftests/powerpc/security/uaccess_flush.c
80
iter = repetitions;
virt/kvm/kvm_main.c
1991
struct kvm_memslot_iter iter;
virt/kvm/kvm_main.c
1993
kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
virt/kvm/kvm_main.c
1994
if (iter.slot->id != id)
virt/kvm/kvm_main.c
2478
struct kvm_memslot_iter iter;
virt/kvm/kvm_main.c
2497
kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
virt/kvm/kvm_main.c
2498
slot = iter.slot;